base.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Procedures for creating, accessing and interpreting the device tree.
  4. *
  5. * Paul Mackerras August 1996.
  6. * Copyright (C) 1996-2005 Paul Mackerras.
  7. *
  8. * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
  9. * {engebret|bergner}@us.ibm.com
  10. *
  11. * Adapted for sparc and sparc64 by David S. Miller [email protected]
  12. *
  13. * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and
  14. * Grant Likely.
  15. */
  16. #define pr_fmt(fmt) "OF: " fmt
  17. #include <linux/console.h>
  18. #include <linux/ctype.h>
  19. #include <linux/cpu.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/of_graph.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/slab.h>
  26. #include <linux/string.h>
  27. #include <linux/proc_fs.h>
  28. #include "of_private.h"
  29. LIST_HEAD(aliases_lookup);
  30. struct device_node *of_root;
  31. EXPORT_SYMBOL(of_root);
  32. struct device_node *of_chosen;
  33. EXPORT_SYMBOL(of_chosen);
  34. struct device_node *of_aliases;
  35. struct device_node *of_stdout;
  36. static const char *of_stdout_options;
  37. struct kset *of_kset;
  38. /*
  39. * Used to protect the of_aliases, to hold off addition of nodes to sysfs.
  40. * This mutex must be held whenever modifications are being made to the
  41. * device tree. The of_{attach,detach}_node() and
  42. * of_{add,remove,update}_property() helpers make sure this happens.
  43. */
  44. DEFINE_MUTEX(of_mutex);
  45. /* use when traversing tree through the child, sibling,
  46. * or parent members of struct device_node.
  47. */
  48. DEFINE_RAW_SPINLOCK(devtree_lock);
  49. bool of_node_name_eq(const struct device_node *np, const char *name)
  50. {
  51. const char *node_name;
  52. size_t len;
  53. if (!np)
  54. return false;
  55. node_name = kbasename(np->full_name);
  56. len = strchrnul(node_name, '@') - node_name;
  57. return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
  58. }
  59. EXPORT_SYMBOL(of_node_name_eq);
  60. bool of_node_name_prefix(const struct device_node *np, const char *prefix)
  61. {
  62. if (!np)
  63. return false;
  64. return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
  65. }
  66. EXPORT_SYMBOL(of_node_name_prefix);
  67. static bool __of_node_is_type(const struct device_node *np, const char *type)
  68. {
  69. const char *match = __of_get_property(np, "device_type", NULL);
  70. return np && match && type && !strcmp(match, type);
  71. }
  72. int of_bus_n_addr_cells(struct device_node *np)
  73. {
  74. u32 cells;
  75. for (; np; np = np->parent)
  76. if (!of_property_read_u32(np, "#address-cells", &cells))
  77. return cells;
  78. /* No #address-cells property for the root node */
  79. return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
  80. }
  81. int of_n_addr_cells(struct device_node *np)
  82. {
  83. if (np->parent)
  84. np = np->parent;
  85. return of_bus_n_addr_cells(np);
  86. }
  87. EXPORT_SYMBOL(of_n_addr_cells);
  88. int of_bus_n_size_cells(struct device_node *np)
  89. {
  90. u32 cells;
  91. for (; np; np = np->parent)
  92. if (!of_property_read_u32(np, "#size-cells", &cells))
  93. return cells;
  94. /* No #size-cells property for the root node */
  95. return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
  96. }
  97. int of_n_size_cells(struct device_node *np)
  98. {
  99. if (np->parent)
  100. np = np->parent;
  101. return of_bus_n_size_cells(np);
  102. }
  103. EXPORT_SYMBOL(of_n_size_cells);
  104. #ifdef CONFIG_NUMA
  105. int __weak of_node_to_nid(struct device_node *np)
  106. {
  107. return NUMA_NO_NODE;
  108. }
  109. #endif
  110. #define OF_PHANDLE_CACHE_BITS 7
  111. #define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
  112. static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
  113. static u32 of_phandle_cache_hash(phandle handle)
  114. {
  115. return hash_32(handle, OF_PHANDLE_CACHE_BITS);
  116. }
  117. /*
  118. * Caller must hold devtree_lock.
  119. */
  120. void __of_phandle_cache_inv_entry(phandle handle)
  121. {
  122. u32 handle_hash;
  123. struct device_node *np;
  124. if (!handle)
  125. return;
  126. handle_hash = of_phandle_cache_hash(handle);
  127. np = phandle_cache[handle_hash];
  128. if (np && handle == np->phandle)
  129. phandle_cache[handle_hash] = NULL;
  130. }
  131. void __init of_core_init(void)
  132. {
  133. struct device_node *np;
  134. /* Create the kset, and register existing nodes */
  135. mutex_lock(&of_mutex);
  136. of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
  137. if (!of_kset) {
  138. mutex_unlock(&of_mutex);
  139. pr_err("failed to register existing nodes\n");
  140. return;
  141. }
  142. for_each_of_allnodes(np) {
  143. __of_attach_node_sysfs(np);
  144. if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
  145. phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
  146. }
  147. mutex_unlock(&of_mutex);
  148. /* Symlink in /proc as required by userspace ABI */
  149. if (of_root)
  150. proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
  151. }
  152. static struct property *__of_find_property(const struct device_node *np,
  153. const char *name, int *lenp)
  154. {
  155. struct property *pp;
  156. if (!np)
  157. return NULL;
  158. for (pp = np->properties; pp; pp = pp->next) {
  159. if (of_prop_cmp(pp->name, name) == 0) {
  160. if (lenp)
  161. *lenp = pp->length;
  162. break;
  163. }
  164. }
  165. return pp;
  166. }
  167. struct property *of_find_property(const struct device_node *np,
  168. const char *name,
  169. int *lenp)
  170. {
  171. struct property *pp;
  172. unsigned long flags;
  173. raw_spin_lock_irqsave(&devtree_lock, flags);
  174. pp = __of_find_property(np, name, lenp);
  175. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  176. return pp;
  177. }
  178. EXPORT_SYMBOL(of_find_property);
  179. struct device_node *__of_find_all_nodes(struct device_node *prev)
  180. {
  181. struct device_node *np;
  182. if (!prev) {
  183. np = of_root;
  184. } else if (prev->child) {
  185. np = prev->child;
  186. } else {
  187. /* Walk back up looking for a sibling, or the end of the structure */
  188. np = prev;
  189. while (np->parent && !np->sibling)
  190. np = np->parent;
  191. np = np->sibling; /* Might be null at the end of the tree */
  192. }
  193. return np;
  194. }
  195. /**
  196. * of_find_all_nodes - Get next node in global list
  197. * @prev: Previous node or NULL to start iteration
  198. * of_node_put() will be called on it
  199. *
  200. * Return: A node pointer with refcount incremented, use
  201. * of_node_put() on it when done.
  202. */
  203. struct device_node *of_find_all_nodes(struct device_node *prev)
  204. {
  205. struct device_node *np;
  206. unsigned long flags;
  207. raw_spin_lock_irqsave(&devtree_lock, flags);
  208. np = __of_find_all_nodes(prev);
  209. of_node_get(np);
  210. of_node_put(prev);
  211. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  212. return np;
  213. }
  214. EXPORT_SYMBOL(of_find_all_nodes);
  215. /*
  216. * Find a property with a given name for a given node
  217. * and return the value.
  218. */
  219. const void *__of_get_property(const struct device_node *np,
  220. const char *name, int *lenp)
  221. {
  222. struct property *pp = __of_find_property(np, name, lenp);
  223. return pp ? pp->value : NULL;
  224. }
  225. /*
  226. * Find a property with a given name for a given node
  227. * and return the value.
  228. */
  229. const void *of_get_property(const struct device_node *np, const char *name,
  230. int *lenp)
  231. {
  232. struct property *pp = of_find_property(np, name, lenp);
  233. return pp ? pp->value : NULL;
  234. }
  235. EXPORT_SYMBOL(of_get_property);
  236. /**
  237. * of_get_cpu_hwid - Get the hardware ID from a CPU device node
  238. *
  239. * @cpun: CPU number(logical index) for which device node is required
  240. * @thread: The local thread number to get the hardware ID for.
  241. *
  242. * Return: The hardware ID for the CPU node or ~0ULL if not found.
  243. */
  244. u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread)
  245. {
  246. const __be32 *cell;
  247. int ac, len;
  248. ac = of_n_addr_cells(cpun);
  249. cell = of_get_property(cpun, "reg", &len);
  250. if (!cell || !ac || ((sizeof(*cell) * ac * (thread + 1)) > len))
  251. return ~0ULL;
  252. cell += ac * thread;
  253. return of_read_number(cell, ac);
  254. }
  255. /*
  256. * arch_match_cpu_phys_id - Match the given logical CPU and physical id
  257. *
  258. * @cpu: logical cpu index of a core/thread
  259. * @phys_id: physical identifier of a core/thread
  260. *
  261. * CPU logical to physical index mapping is architecture specific.
  262. * However this __weak function provides a default match of physical
  263. * id to logical cpu index. phys_id provided here is usually values read
  264. * from the device tree which must match the hardware internal registers.
  265. *
  266. * Returns true if the physical identifier and the logical cpu index
  267. * correspond to the same core/thread, false otherwise.
  268. */
  269. bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
  270. {
  271. return (u32)phys_id == cpu;
  272. }
  273. /*
  274. * Checks if the given "prop_name" property holds the physical id of the
  275. * core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not
  276. * NULL, local thread number within the core is returned in it.
  277. */
  278. static bool __of_find_n_match_cpu_property(struct device_node *cpun,
  279. const char *prop_name, int cpu, unsigned int *thread)
  280. {
  281. const __be32 *cell;
  282. int ac, prop_len, tid;
  283. u64 hwid;
  284. ac = of_n_addr_cells(cpun);
  285. cell = of_get_property(cpun, prop_name, &prop_len);
  286. if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
  287. return true;
  288. if (!cell || !ac)
  289. return false;
  290. prop_len /= sizeof(*cell) * ac;
  291. for (tid = 0; tid < prop_len; tid++) {
  292. hwid = of_read_number(cell, ac);
  293. if (arch_match_cpu_phys_id(cpu, hwid)) {
  294. if (thread)
  295. *thread = tid;
  296. return true;
  297. }
  298. cell += ac;
  299. }
  300. return false;
  301. }
  302. /*
  303. * arch_find_n_match_cpu_physical_id - See if the given device node is
  304. * for the cpu corresponding to logical cpu 'cpu'. Return true if so,
  305. * else false. If 'thread' is non-NULL, the local thread number within the
  306. * core is returned in it.
  307. */
  308. bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
  309. int cpu, unsigned int *thread)
  310. {
  311. /* Check for non-standard "ibm,ppc-interrupt-server#s" property
  312. * for thread ids on PowerPC. If it doesn't exist fallback to
  313. * standard "reg" property.
  314. */
  315. if (IS_ENABLED(CONFIG_PPC) &&
  316. __of_find_n_match_cpu_property(cpun,
  317. "ibm,ppc-interrupt-server#s",
  318. cpu, thread))
  319. return true;
  320. return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
  321. }
  322. /**
  323. * of_get_cpu_node - Get device node associated with the given logical CPU
  324. *
  325. * @cpu: CPU number(logical index) for which device node is required
  326. * @thread: if not NULL, local thread number within the physical core is
  327. * returned
  328. *
  329. * The main purpose of this function is to retrieve the device node for the
  330. * given logical CPU index. It should be used to initialize the of_node in
  331. * cpu device. Once of_node in cpu device is populated, all the further
  332. * references can use that instead.
  333. *
  334. * CPU logical to physical index mapping is architecture specific and is built
  335. * before booting secondary cores. This function uses arch_match_cpu_phys_id
  336. * which can be overridden by architecture specific implementation.
  337. *
  338. * Return: A node pointer for the logical cpu with refcount incremented, use
  339. * of_node_put() on it when done. Returns NULL if not found.
  340. */
  341. struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
  342. {
  343. struct device_node *cpun;
  344. for_each_of_cpu_node(cpun) {
  345. if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
  346. return cpun;
  347. }
  348. return NULL;
  349. }
  350. EXPORT_SYMBOL(of_get_cpu_node);
  351. /**
  352. * of_cpu_node_to_id: Get the logical CPU number for a given device_node
  353. *
  354. * @cpu_node: Pointer to the device_node for CPU.
  355. *
  356. * Return: The logical CPU number of the given CPU device_node or -ENODEV if the
  357. * CPU is not found.
  358. */
  359. int of_cpu_node_to_id(struct device_node *cpu_node)
  360. {
  361. int cpu;
  362. bool found = false;
  363. struct device_node *np;
  364. for_each_possible_cpu(cpu) {
  365. np = of_cpu_device_node_get(cpu);
  366. found = (cpu_node == np);
  367. of_node_put(np);
  368. if (found)
  369. return cpu;
  370. }
  371. return -ENODEV;
  372. }
  373. EXPORT_SYMBOL(of_cpu_node_to_id);
  374. /**
  375. * of_get_cpu_state_node - Get CPU's idle state node at the given index
  376. *
  377. * @cpu_node: The device node for the CPU
  378. * @index: The index in the list of the idle states
  379. *
  380. * Two generic methods can be used to describe a CPU's idle states, either via
  381. * a flattened description through the "cpu-idle-states" binding or via the
  382. * hierarchical layout, using the "power-domains" and the "domain-idle-states"
  383. * bindings. This function check for both and returns the idle state node for
  384. * the requested index.
  385. *
  386. * Return: An idle state node if found at @index. The refcount is incremented
  387. * for it, so call of_node_put() on it when done. Returns NULL if not found.
  388. */
  389. struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
  390. int index)
  391. {
  392. struct of_phandle_args args;
  393. int err;
  394. err = of_parse_phandle_with_args(cpu_node, "power-domains",
  395. "#power-domain-cells", 0, &args);
  396. if (!err) {
  397. struct device_node *state_node =
  398. of_parse_phandle(args.np, "domain-idle-states", index);
  399. of_node_put(args.np);
  400. if (state_node)
  401. return state_node;
  402. }
  403. return of_parse_phandle(cpu_node, "cpu-idle-states", index);
  404. }
  405. EXPORT_SYMBOL(of_get_cpu_state_node);
  406. /**
  407. * __of_device_is_compatible() - Check if the node matches given constraints
  408. * @device: pointer to node
  409. * @compat: required compatible string, NULL or "" for any match
  410. * @type: required device_type value, NULL or "" for any match
  411. * @name: required node name, NULL or "" for any match
  412. *
  413. * Checks if the given @compat, @type and @name strings match the
  414. * properties of the given @device. A constraints can be skipped by
  415. * passing NULL or an empty string as the constraint.
  416. *
  417. * Returns 0 for no match, and a positive integer on match. The return
  418. * value is a relative score with larger values indicating better
  419. * matches. The score is weighted for the most specific compatible value
  420. * to get the highest score. Matching type is next, followed by matching
  421. * name. Practically speaking, this results in the following priority
  422. * order for matches:
  423. *
  424. * 1. specific compatible && type && name
  425. * 2. specific compatible && type
  426. * 3. specific compatible && name
  427. * 4. specific compatible
  428. * 5. general compatible && type && name
  429. * 6. general compatible && type
  430. * 7. general compatible && name
  431. * 8. general compatible
  432. * 9. type && name
  433. * 10. type
  434. * 11. name
  435. */
  436. static int __of_device_is_compatible(const struct device_node *device,
  437. const char *compat, const char *type, const char *name)
  438. {
  439. struct property *prop;
  440. const char *cp;
  441. int index = 0, score = 0;
  442. /* Compatible match has highest priority */
  443. if (compat && compat[0]) {
  444. prop = __of_find_property(device, "compatible", NULL);
  445. for (cp = of_prop_next_string(prop, NULL); cp;
  446. cp = of_prop_next_string(prop, cp), index++) {
  447. if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
  448. score = INT_MAX/2 - (index << 2);
  449. break;
  450. }
  451. }
  452. if (!score)
  453. return 0;
  454. }
  455. /* Matching type is better than matching name */
  456. if (type && type[0]) {
  457. if (!__of_node_is_type(device, type))
  458. return 0;
  459. score += 2;
  460. }
  461. /* Matching name is a bit better than not */
  462. if (name && name[0]) {
  463. if (!of_node_name_eq(device, name))
  464. return 0;
  465. score++;
  466. }
  467. return score;
  468. }
  469. /** Checks if the given "compat" string matches one of the strings in
  470. * the device's "compatible" property
  471. */
  472. int of_device_is_compatible(const struct device_node *device,
  473. const char *compat)
  474. {
  475. unsigned long flags;
  476. int res;
  477. raw_spin_lock_irqsave(&devtree_lock, flags);
  478. res = __of_device_is_compatible(device, compat, NULL, NULL);
  479. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  480. return res;
  481. }
  482. EXPORT_SYMBOL(of_device_is_compatible);
  483. /** Checks if the device is compatible with any of the entries in
  484. * a NULL terminated array of strings. Returns the best match
  485. * score or 0.
  486. */
  487. int of_device_compatible_match(const struct device_node *device,
  488. const char *const *compat)
  489. {
  490. unsigned int tmp, score = 0;
  491. if (!compat)
  492. return 0;
  493. while (*compat) {
  494. tmp = of_device_is_compatible(device, *compat);
  495. if (tmp > score)
  496. score = tmp;
  497. compat++;
  498. }
  499. return score;
  500. }
  501. EXPORT_SYMBOL_GPL(of_device_compatible_match);
  502. /**
  503. * of_machine_is_compatible - Test root of device tree for a given compatible value
  504. * @compat: compatible string to look for in root node's compatible property.
  505. *
  506. * Return: A positive integer if the root node has the given value in its
  507. * compatible property.
  508. */
  509. int of_machine_is_compatible(const char *compat)
  510. {
  511. struct device_node *root;
  512. int rc = 0;
  513. root = of_find_node_by_path("/");
  514. if (root) {
  515. rc = of_device_is_compatible(root, compat);
  516. of_node_put(root);
  517. }
  518. return rc;
  519. }
  520. EXPORT_SYMBOL(of_machine_is_compatible);
  521. /**
  522. * __of_device_is_available - check if a device is available for use
  523. *
  524. * @device: Node to check for availability, with locks already held
  525. *
  526. * Return: True if the status property is absent or set to "okay" or "ok",
  527. * false otherwise
  528. */
  529. static bool __of_device_is_available(const struct device_node *device)
  530. {
  531. const char *status;
  532. int statlen;
  533. if (!device)
  534. return false;
  535. status = __of_get_property(device, "status", &statlen);
  536. if (status == NULL)
  537. return true;
  538. if (statlen > 0) {
  539. if (!strcmp(status, "okay") || !strcmp(status, "ok"))
  540. return true;
  541. }
  542. return false;
  543. }
  544. /**
  545. * of_device_is_available - check if a device is available for use
  546. *
  547. * @device: Node to check for availability
  548. *
  549. * Return: True if the status property is absent or set to "okay" or "ok",
  550. * false otherwise
  551. */
  552. bool of_device_is_available(const struct device_node *device)
  553. {
  554. unsigned long flags;
  555. bool res;
  556. raw_spin_lock_irqsave(&devtree_lock, flags);
  557. res = __of_device_is_available(device);
  558. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  559. return res;
  560. }
  561. EXPORT_SYMBOL(of_device_is_available);
  562. /**
  563. * __of_device_is_fail - check if a device has status "fail" or "fail-..."
  564. *
  565. * @device: Node to check status for, with locks already held
  566. *
  567. * Return: True if the status property is set to "fail" or "fail-..." (for any
  568. * error code suffix), false otherwise
  569. */
  570. static bool __of_device_is_fail(const struct device_node *device)
  571. {
  572. const char *status;
  573. if (!device)
  574. return false;
  575. status = __of_get_property(device, "status", NULL);
  576. if (status == NULL)
  577. return false;
  578. return !strcmp(status, "fail") || !strncmp(status, "fail-", 5);
  579. }
  580. /**
  581. * of_device_is_big_endian - check if a device has BE registers
  582. *
  583. * @device: Node to check for endianness
  584. *
  585. * Return: True if the device has a "big-endian" property, or if the kernel
  586. * was compiled for BE *and* the device has a "native-endian" property.
  587. * Returns false otherwise.
  588. *
  589. * Callers would nominally use ioread32be/iowrite32be if
  590. * of_device_is_big_endian() == true, or readl/writel otherwise.
  591. */
  592. bool of_device_is_big_endian(const struct device_node *device)
  593. {
  594. if (of_property_read_bool(device, "big-endian"))
  595. return true;
  596. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
  597. of_property_read_bool(device, "native-endian"))
  598. return true;
  599. return false;
  600. }
  601. EXPORT_SYMBOL(of_device_is_big_endian);
  602. /**
  603. * of_get_parent - Get a node's parent if any
  604. * @node: Node to get parent
  605. *
  606. * Return: A node pointer with refcount incremented, use
  607. * of_node_put() on it when done.
  608. */
  609. struct device_node *of_get_parent(const struct device_node *node)
  610. {
  611. struct device_node *np;
  612. unsigned long flags;
  613. if (!node)
  614. return NULL;
  615. raw_spin_lock_irqsave(&devtree_lock, flags);
  616. np = of_node_get(node->parent);
  617. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  618. return np;
  619. }
  620. EXPORT_SYMBOL(of_get_parent);
  621. /**
  622. * of_get_next_parent - Iterate to a node's parent
  623. * @node: Node to get parent of
  624. *
  625. * This is like of_get_parent() except that it drops the
  626. * refcount on the passed node, making it suitable for iterating
  627. * through a node's parents.
  628. *
  629. * Return: A node pointer with refcount incremented, use
  630. * of_node_put() on it when done.
  631. */
  632. struct device_node *of_get_next_parent(struct device_node *node)
  633. {
  634. struct device_node *parent;
  635. unsigned long flags;
  636. if (!node)
  637. return NULL;
  638. raw_spin_lock_irqsave(&devtree_lock, flags);
  639. parent = of_node_get(node->parent);
  640. of_node_put(node);
  641. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  642. return parent;
  643. }
  644. EXPORT_SYMBOL(of_get_next_parent);
  645. static struct device_node *__of_get_next_child(const struct device_node *node,
  646. struct device_node *prev)
  647. {
  648. struct device_node *next;
  649. if (!node)
  650. return NULL;
  651. next = prev ? prev->sibling : node->child;
  652. of_node_get(next);
  653. of_node_put(prev);
  654. return next;
  655. }
  656. #define __for_each_child_of_node(parent, child) \
  657. for (child = __of_get_next_child(parent, NULL); child != NULL; \
  658. child = __of_get_next_child(parent, child))
  659. /**
  660. * of_get_next_child - Iterate a node childs
  661. * @node: parent node
  662. * @prev: previous child of the parent node, or NULL to get first
  663. *
  664. * Return: A node pointer with refcount incremented, use of_node_put() on
  665. * it when done. Returns NULL when prev is the last child. Decrements the
  666. * refcount of prev.
  667. */
  668. struct device_node *of_get_next_child(const struct device_node *node,
  669. struct device_node *prev)
  670. {
  671. struct device_node *next;
  672. unsigned long flags;
  673. raw_spin_lock_irqsave(&devtree_lock, flags);
  674. next = __of_get_next_child(node, prev);
  675. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  676. return next;
  677. }
  678. EXPORT_SYMBOL(of_get_next_child);
  679. /**
  680. * of_get_next_available_child - Find the next available child node
  681. * @node: parent node
  682. * @prev: previous child of the parent node, or NULL to get first
  683. *
  684. * This function is like of_get_next_child(), except that it
  685. * automatically skips any disabled nodes (i.e. status = "disabled").
  686. */
  687. struct device_node *of_get_next_available_child(const struct device_node *node,
  688. struct device_node *prev)
  689. {
  690. struct device_node *next;
  691. unsigned long flags;
  692. if (!node)
  693. return NULL;
  694. raw_spin_lock_irqsave(&devtree_lock, flags);
  695. next = prev ? prev->sibling : node->child;
  696. for (; next; next = next->sibling) {
  697. if (!__of_device_is_available(next))
  698. continue;
  699. if (of_node_get(next))
  700. break;
  701. }
  702. of_node_put(prev);
  703. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  704. return next;
  705. }
  706. EXPORT_SYMBOL(of_get_next_available_child);
  707. /**
  708. * of_get_next_cpu_node - Iterate on cpu nodes
  709. * @prev: previous child of the /cpus node, or NULL to get first
  710. *
  711. * Unusable CPUs (those with the status property set to "fail" or "fail-...")
  712. * will be skipped.
  713. *
  714. * Return: A cpu node pointer with refcount incremented, use of_node_put()
  715. * on it when done. Returns NULL when prev is the last child. Decrements
  716. * the refcount of prev.
  717. */
  718. struct device_node *of_get_next_cpu_node(struct device_node *prev)
  719. {
  720. struct device_node *next = NULL;
  721. unsigned long flags;
  722. struct device_node *node;
  723. if (!prev)
  724. node = of_find_node_by_path("/cpus");
  725. raw_spin_lock_irqsave(&devtree_lock, flags);
  726. if (prev)
  727. next = prev->sibling;
  728. else if (node) {
  729. next = node->child;
  730. of_node_put(node);
  731. }
  732. for (; next; next = next->sibling) {
  733. if (__of_device_is_fail(next))
  734. continue;
  735. if (!(of_node_name_eq(next, "cpu") ||
  736. __of_node_is_type(next, "cpu")))
  737. continue;
  738. if (of_node_get(next))
  739. break;
  740. }
  741. of_node_put(prev);
  742. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  743. return next;
  744. }
  745. EXPORT_SYMBOL(of_get_next_cpu_node);
  746. /**
  747. * of_get_compatible_child - Find compatible child node
  748. * @parent: parent node
  749. * @compatible: compatible string
  750. *
  751. * Lookup child node whose compatible property contains the given compatible
  752. * string.
  753. *
  754. * Return: a node pointer with refcount incremented, use of_node_put() on it
  755. * when done; or NULL if not found.
  756. */
  757. struct device_node *of_get_compatible_child(const struct device_node *parent,
  758. const char *compatible)
  759. {
  760. struct device_node *child;
  761. for_each_child_of_node(parent, child) {
  762. if (of_device_is_compatible(child, compatible))
  763. break;
  764. }
  765. return child;
  766. }
  767. EXPORT_SYMBOL(of_get_compatible_child);
  768. /**
  769. * of_get_child_by_name - Find the child node by name for a given parent
  770. * @node: parent node
  771. * @name: child name to look for.
  772. *
  773. * This function looks for child node for given matching name
  774. *
  775. * Return: A node pointer if found, with refcount incremented, use
  776. * of_node_put() on it when done.
  777. * Returns NULL if node is not found.
  778. */
  779. struct device_node *of_get_child_by_name(const struct device_node *node,
  780. const char *name)
  781. {
  782. struct device_node *child;
  783. for_each_child_of_node(node, child)
  784. if (of_node_name_eq(child, name))
  785. break;
  786. return child;
  787. }
  788. EXPORT_SYMBOL(of_get_child_by_name);
  789. struct device_node *__of_find_node_by_path(struct device_node *parent,
  790. const char *path)
  791. {
  792. struct device_node *child;
  793. int len;
  794. len = strcspn(path, "/:");
  795. if (!len)
  796. return NULL;
  797. __for_each_child_of_node(parent, child) {
  798. const char *name = kbasename(child->full_name);
  799. if (strncmp(path, name, len) == 0 && (strlen(name) == len))
  800. return child;
  801. }
  802. return NULL;
  803. }
  804. struct device_node *__of_find_node_by_full_path(struct device_node *node,
  805. const char *path)
  806. {
  807. const char *separator = strchr(path, ':');
  808. while (node && *path == '/') {
  809. struct device_node *tmp = node;
  810. path++; /* Increment past '/' delimiter */
  811. node = __of_find_node_by_path(node, path);
  812. of_node_put(tmp);
  813. path = strchrnul(path, '/');
  814. if (separator && separator < path)
  815. break;
  816. }
  817. return node;
  818. }
  819. /**
  820. * of_find_node_opts_by_path - Find a node matching a full OF path
  821. * @path: Either the full path to match, or if the path does not
  822. * start with '/', the name of a property of the /aliases
  823. * node (an alias). In the case of an alias, the node
  824. * matching the alias' value will be returned.
  825. * @opts: Address of a pointer into which to store the start of
  826. * an options string appended to the end of the path with
  827. * a ':' separator.
  828. *
  829. * Valid paths:
  830. * * /foo/bar Full path
  831. * * foo Valid alias
  832. * * foo/bar Valid alias + relative path
  833. *
  834. * Return: A node pointer with refcount incremented, use
  835. * of_node_put() on it when done.
  836. */
  837. struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
  838. {
  839. struct device_node *np = NULL;
  840. struct property *pp;
  841. unsigned long flags;
  842. const char *separator = strchr(path, ':');
  843. if (opts)
  844. *opts = separator ? separator + 1 : NULL;
  845. if (strcmp(path, "/") == 0)
  846. return of_node_get(of_root);
  847. /* The path could begin with an alias */
  848. if (*path != '/') {
  849. int len;
  850. const char *p = separator;
  851. if (!p)
  852. p = strchrnul(path, '/');
  853. len = p - path;
  854. /* of_aliases must not be NULL */
  855. if (!of_aliases)
  856. return NULL;
  857. for_each_property_of_node(of_aliases, pp) {
  858. if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
  859. np = of_find_node_by_path(pp->value);
  860. break;
  861. }
  862. }
  863. if (!np)
  864. return NULL;
  865. path = p;
  866. }
  867. /* Step down the tree matching path components */
  868. raw_spin_lock_irqsave(&devtree_lock, flags);
  869. if (!np)
  870. np = of_node_get(of_root);
  871. np = __of_find_node_by_full_path(np, path);
  872. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  873. return np;
  874. }
  875. EXPORT_SYMBOL(of_find_node_opts_by_path);
  876. /**
  877. * of_find_node_by_name - Find a node by its "name" property
  878. * @from: The node to start searching from or NULL; the node
  879. * you pass will not be searched, only the next one
  880. * will. Typically, you pass what the previous call
  881. * returned. of_node_put() will be called on @from.
  882. * @name: The name string to match against
  883. *
  884. * Return: A node pointer with refcount incremented, use
  885. * of_node_put() on it when done.
  886. */
  887. struct device_node *of_find_node_by_name(struct device_node *from,
  888. const char *name)
  889. {
  890. struct device_node *np;
  891. unsigned long flags;
  892. raw_spin_lock_irqsave(&devtree_lock, flags);
  893. for_each_of_allnodes_from(from, np)
  894. if (of_node_name_eq(np, name) && of_node_get(np))
  895. break;
  896. of_node_put(from);
  897. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  898. return np;
  899. }
  900. EXPORT_SYMBOL(of_find_node_by_name);
  901. /**
  902. * of_find_node_by_type - Find a node by its "device_type" property
  903. * @from: The node to start searching from, or NULL to start searching
  904. * the entire device tree. The node you pass will not be
  905. * searched, only the next one will; typically, you pass
  906. * what the previous call returned. of_node_put() will be
  907. * called on from for you.
  908. * @type: The type string to match against
  909. *
  910. * Return: A node pointer with refcount incremented, use
  911. * of_node_put() on it when done.
  912. */
  913. struct device_node *of_find_node_by_type(struct device_node *from,
  914. const char *type)
  915. {
  916. struct device_node *np;
  917. unsigned long flags;
  918. raw_spin_lock_irqsave(&devtree_lock, flags);
  919. for_each_of_allnodes_from(from, np)
  920. if (__of_node_is_type(np, type) && of_node_get(np))
  921. break;
  922. of_node_put(from);
  923. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  924. return np;
  925. }
  926. EXPORT_SYMBOL(of_find_node_by_type);
  927. /**
  928. * of_find_compatible_node - Find a node based on type and one of the
  929. * tokens in its "compatible" property
  930. * @from: The node to start searching from or NULL, the node
  931. * you pass will not be searched, only the next one
  932. * will; typically, you pass what the previous call
  933. * returned. of_node_put() will be called on it
  934. * @type: The type string to match "device_type" or NULL to ignore
  935. * @compatible: The string to match to one of the tokens in the device
  936. * "compatible" list.
  937. *
  938. * Return: A node pointer with refcount incremented, use
  939. * of_node_put() on it when done.
  940. */
  941. struct device_node *of_find_compatible_node(struct device_node *from,
  942. const char *type, const char *compatible)
  943. {
  944. struct device_node *np;
  945. unsigned long flags;
  946. raw_spin_lock_irqsave(&devtree_lock, flags);
  947. for_each_of_allnodes_from(from, np)
  948. if (__of_device_is_compatible(np, compatible, type, NULL) &&
  949. of_node_get(np))
  950. break;
  951. of_node_put(from);
  952. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  953. return np;
  954. }
  955. EXPORT_SYMBOL(of_find_compatible_node);
  956. /**
  957. * of_find_node_with_property - Find a node which has a property with
  958. * the given name.
  959. * @from: The node to start searching from or NULL, the node
  960. * you pass will not be searched, only the next one
  961. * will; typically, you pass what the previous call
  962. * returned. of_node_put() will be called on it
  963. * @prop_name: The name of the property to look for.
  964. *
  965. * Return: A node pointer with refcount incremented, use
  966. * of_node_put() on it when done.
  967. */
  968. struct device_node *of_find_node_with_property(struct device_node *from,
  969. const char *prop_name)
  970. {
  971. struct device_node *np;
  972. struct property *pp;
  973. unsigned long flags;
  974. raw_spin_lock_irqsave(&devtree_lock, flags);
  975. for_each_of_allnodes_from(from, np) {
  976. for (pp = np->properties; pp; pp = pp->next) {
  977. if (of_prop_cmp(pp->name, prop_name) == 0) {
  978. of_node_get(np);
  979. goto out;
  980. }
  981. }
  982. }
  983. out:
  984. of_node_put(from);
  985. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  986. return np;
  987. }
  988. EXPORT_SYMBOL(of_find_node_with_property);
  989. static
  990. const struct of_device_id *__of_match_node(const struct of_device_id *matches,
  991. const struct device_node *node)
  992. {
  993. const struct of_device_id *best_match = NULL;
  994. int score, best_score = 0;
  995. if (!matches)
  996. return NULL;
  997. for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
  998. score = __of_device_is_compatible(node, matches->compatible,
  999. matches->type, matches->name);
  1000. if (score > best_score) {
  1001. best_match = matches;
  1002. best_score = score;
  1003. }
  1004. }
  1005. return best_match;
  1006. }
  1007. /**
  1008. * of_match_node - Tell if a device_node has a matching of_match structure
  1009. * @matches: array of of device match structures to search in
  1010. * @node: the of device structure to match against
  1011. *
  1012. * Low level utility function used by device matching.
  1013. */
  1014. const struct of_device_id *of_match_node(const struct of_device_id *matches,
  1015. const struct device_node *node)
  1016. {
  1017. const struct of_device_id *match;
  1018. unsigned long flags;
  1019. raw_spin_lock_irqsave(&devtree_lock, flags);
  1020. match = __of_match_node(matches, node);
  1021. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  1022. return match;
  1023. }
  1024. EXPORT_SYMBOL(of_match_node);
  1025. /**
  1026. * of_find_matching_node_and_match - Find a node based on an of_device_id
  1027. * match table.
  1028. * @from: The node to start searching from or NULL, the node
  1029. * you pass will not be searched, only the next one
  1030. * will; typically, you pass what the previous call
  1031. * returned. of_node_put() will be called on it
  1032. * @matches: array of of device match structures to search in
  1033. * @match: Updated to point at the matches entry which matched
  1034. *
  1035. * Return: A node pointer with refcount incremented, use
  1036. * of_node_put() on it when done.
  1037. */
  1038. struct device_node *of_find_matching_node_and_match(struct device_node *from,
  1039. const struct of_device_id *matches,
  1040. const struct of_device_id **match)
  1041. {
  1042. struct device_node *np;
  1043. const struct of_device_id *m;
  1044. unsigned long flags;
  1045. if (match)
  1046. *match = NULL;
  1047. raw_spin_lock_irqsave(&devtree_lock, flags);
  1048. for_each_of_allnodes_from(from, np) {
  1049. m = __of_match_node(matches, np);
  1050. if (m && of_node_get(np)) {
  1051. if (match)
  1052. *match = m;
  1053. break;
  1054. }
  1055. }
  1056. of_node_put(from);
  1057. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  1058. return np;
  1059. }
  1060. EXPORT_SYMBOL(of_find_matching_node_and_match);
  1061. /**
  1062. * of_modalias_node - Lookup appropriate modalias for a device node
  1063. * @node: pointer to a device tree node
  1064. * @modalias: Pointer to buffer that modalias value will be copied into
  1065. * @len: Length of modalias value
  1066. *
  1067. * Based on the value of the compatible property, this routine will attempt
  1068. * to choose an appropriate modalias value for a particular device tree node.
  1069. * It does this by stripping the manufacturer prefix (as delimited by a ',')
  1070. * from the first entry in the compatible list property.
  1071. *
  1072. * Return: This routine returns 0 on success, <0 on failure.
  1073. */
  1074. int of_modalias_node(struct device_node *node, char *modalias, int len)
  1075. {
  1076. const char *compatible, *p;
  1077. int cplen;
  1078. compatible = of_get_property(node, "compatible", &cplen);
  1079. if (!compatible || strlen(compatible) > cplen)
  1080. return -ENODEV;
  1081. p = strchr(compatible, ',');
  1082. strscpy(modalias, p ? p + 1 : compatible, len);
  1083. return 0;
  1084. }
  1085. EXPORT_SYMBOL_GPL(of_modalias_node);
  1086. /**
  1087. * of_find_node_by_phandle - Find a node given a phandle
  1088. * @handle: phandle of the node to find
  1089. *
  1090. * Return: A node pointer with refcount incremented, use
  1091. * of_node_put() on it when done.
  1092. */
  1093. struct device_node *of_find_node_by_phandle(phandle handle)
  1094. {
  1095. struct device_node *np = NULL;
  1096. unsigned long flags;
  1097. u32 handle_hash;
  1098. if (!handle)
  1099. return NULL;
  1100. handle_hash = of_phandle_cache_hash(handle);
  1101. raw_spin_lock_irqsave(&devtree_lock, flags);
  1102. if (phandle_cache[handle_hash] &&
  1103. handle == phandle_cache[handle_hash]->phandle)
  1104. np = phandle_cache[handle_hash];
  1105. if (!np) {
  1106. for_each_of_allnodes(np)
  1107. if (np->phandle == handle &&
  1108. !of_node_check_flag(np, OF_DETACHED)) {
  1109. phandle_cache[handle_hash] = np;
  1110. break;
  1111. }
  1112. }
  1113. of_node_get(np);
  1114. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  1115. return np;
  1116. }
  1117. EXPORT_SYMBOL(of_find_node_by_phandle);
  1118. void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
  1119. {
  1120. int i;
  1121. printk("%s %pOF", msg, args->np);
  1122. for (i = 0; i < args->args_count; i++) {
  1123. const char delim = i ? ',' : ':';
  1124. pr_cont("%c%08x", delim, args->args[i]);
  1125. }
  1126. pr_cont("\n");
  1127. }
  1128. int of_phandle_iterator_init(struct of_phandle_iterator *it,
  1129. const struct device_node *np,
  1130. const char *list_name,
  1131. const char *cells_name,
  1132. int cell_count)
  1133. {
  1134. const __be32 *list;
  1135. int size;
  1136. memset(it, 0, sizeof(*it));
  1137. /*
  1138. * one of cell_count or cells_name must be provided to determine the
  1139. * argument length.
  1140. */
  1141. if (cell_count < 0 && !cells_name)
  1142. return -EINVAL;
  1143. list = of_get_property(np, list_name, &size);
  1144. if (!list)
  1145. return -ENOENT;
  1146. it->cells_name = cells_name;
  1147. it->cell_count = cell_count;
  1148. it->parent = np;
  1149. it->list_end = list + size / sizeof(*list);
  1150. it->phandle_end = list;
  1151. it->cur = list;
  1152. return 0;
  1153. }
  1154. EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
  1155. int of_phandle_iterator_next(struct of_phandle_iterator *it)
  1156. {
  1157. uint32_t count = 0;
  1158. if (it->node) {
  1159. of_node_put(it->node);
  1160. it->node = NULL;
  1161. }
  1162. if (!it->cur || it->phandle_end >= it->list_end)
  1163. return -ENOENT;
  1164. it->cur = it->phandle_end;
  1165. /* If phandle is 0, then it is an empty entry with no arguments. */
  1166. it->phandle = be32_to_cpup(it->cur++);
  1167. if (it->phandle) {
  1168. /*
  1169. * Find the provider node and parse the #*-cells property to
  1170. * determine the argument length.
  1171. */
  1172. it->node = of_find_node_by_phandle(it->phandle);
  1173. if (it->cells_name) {
  1174. if (!it->node) {
  1175. pr_err("%pOF: could not find phandle %d\n",
  1176. it->parent, it->phandle);
  1177. goto err;
  1178. }
  1179. if (of_property_read_u32(it->node, it->cells_name,
  1180. &count)) {
  1181. /*
  1182. * If both cell_count and cells_name is given,
  1183. * fall back to cell_count in absence
  1184. * of the cells_name property
  1185. */
  1186. if (it->cell_count >= 0) {
  1187. count = it->cell_count;
  1188. } else {
  1189. pr_err("%pOF: could not get %s for %pOF\n",
  1190. it->parent,
  1191. it->cells_name,
  1192. it->node);
  1193. goto err;
  1194. }
  1195. }
  1196. } else {
  1197. count = it->cell_count;
  1198. }
  1199. /*
  1200. * Make sure that the arguments actually fit in the remaining
  1201. * property data length
  1202. */
  1203. if (it->cur + count > it->list_end) {
  1204. if (it->cells_name)
  1205. pr_err("%pOF: %s = %d found %td\n",
  1206. it->parent, it->cells_name,
  1207. count, it->list_end - it->cur);
  1208. else
  1209. pr_err("%pOF: phandle %s needs %d, found %td\n",
  1210. it->parent, of_node_full_name(it->node),
  1211. count, it->list_end - it->cur);
  1212. goto err;
  1213. }
  1214. }
  1215. it->phandle_end = it->cur + count;
  1216. it->cur_count = count;
  1217. return 0;
  1218. err:
  1219. if (it->node) {
  1220. of_node_put(it->node);
  1221. it->node = NULL;
  1222. }
  1223. return -EINVAL;
  1224. }
  1225. EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
  1226. int of_phandle_iterator_args(struct of_phandle_iterator *it,
  1227. uint32_t *args,
  1228. int size)
  1229. {
  1230. int i, count;
  1231. count = it->cur_count;
  1232. if (WARN_ON(size < count))
  1233. count = size;
  1234. for (i = 0; i < count; i++)
  1235. args[i] = be32_to_cpup(it->cur++);
  1236. return count;
  1237. }
  1238. int __of_parse_phandle_with_args(const struct device_node *np,
  1239. const char *list_name,
  1240. const char *cells_name,
  1241. int cell_count, int index,
  1242. struct of_phandle_args *out_args)
  1243. {
  1244. struct of_phandle_iterator it;
  1245. int rc, cur_index = 0;
  1246. if (index < 0)
  1247. return -EINVAL;
  1248. /* Loop over the phandles until all the requested entry is found */
  1249. of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
  1250. /*
  1251. * All of the error cases bail out of the loop, so at
  1252. * this point, the parsing is successful. If the requested
  1253. * index matches, then fill the out_args structure and return,
  1254. * or return -ENOENT for an empty entry.
  1255. */
  1256. rc = -ENOENT;
  1257. if (cur_index == index) {
  1258. if (!it.phandle)
  1259. goto err;
  1260. if (out_args) {
  1261. int c;
  1262. c = of_phandle_iterator_args(&it,
  1263. out_args->args,
  1264. MAX_PHANDLE_ARGS);
  1265. out_args->np = it.node;
  1266. out_args->args_count = c;
  1267. } else {
  1268. of_node_put(it.node);
  1269. }
  1270. /* Found it! return success */
  1271. return 0;
  1272. }
  1273. cur_index++;
  1274. }
  1275. /*
  1276. * Unlock node before returning result; will be one of:
  1277. * -ENOENT : index is for empty phandle
  1278. * -EINVAL : parsing error on data
  1279. */
  1280. err:
  1281. of_node_put(it.node);
  1282. return rc;
  1283. }
  1284. EXPORT_SYMBOL(__of_parse_phandle_with_args);
  1285. /**
  1286. * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it
  1287. * @np: pointer to a device tree node containing a list
  1288. * @list_name: property name that contains a list
  1289. * @stem_name: stem of property names that specify phandles' arguments count
  1290. * @index: index of a phandle to parse out
  1291. * @out_args: optional pointer to output arguments structure (will be filled)
  1292. *
  1293. * This function is useful to parse lists of phandles and their arguments.
  1294. * Returns 0 on success and fills out_args, on error returns appropriate errno
  1295. * value. The difference between this function and of_parse_phandle_with_args()
  1296. * is that this API remaps a phandle if the node the phandle points to has
  1297. * a <@stem_name>-map property.
  1298. *
  1299. * Caller is responsible to call of_node_put() on the returned out_args->np
  1300. * pointer.
  1301. *
  1302. * Example::
  1303. *
  1304. * phandle1: node1 {
  1305. * #list-cells = <2>;
  1306. * };
  1307. *
  1308. * phandle2: node2 {
  1309. * #list-cells = <1>;
  1310. * };
  1311. *
  1312. * phandle3: node3 {
  1313. * #list-cells = <1>;
  1314. * list-map = <0 &phandle2 3>,
  1315. * <1 &phandle2 2>,
  1316. * <2 &phandle1 5 1>;
  1317. * list-map-mask = <0x3>;
  1318. * };
  1319. *
  1320. * node4 {
  1321. * list = <&phandle1 1 2 &phandle3 0>;
  1322. * };
  1323. *
  1324. * To get a device_node of the ``node2`` node you may call this:
  1325. * of_parse_phandle_with_args(node4, "list", "list", 1, &args);
  1326. */
  1327. int of_parse_phandle_with_args_map(const struct device_node *np,
  1328. const char *list_name,
  1329. const char *stem_name,
  1330. int index, struct of_phandle_args *out_args)
  1331. {
  1332. char *cells_name, *map_name = NULL, *mask_name = NULL;
  1333. char *pass_name = NULL;
  1334. struct device_node *cur, *new = NULL;
  1335. const __be32 *map, *mask, *pass;
  1336. static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
  1337. static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
  1338. __be32 initial_match_array[MAX_PHANDLE_ARGS];
  1339. const __be32 *match_array = initial_match_array;
  1340. int i, ret, map_len, match;
  1341. u32 list_size, new_size;
  1342. if (index < 0)
  1343. return -EINVAL;
  1344. cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
  1345. if (!cells_name)
  1346. return -ENOMEM;
  1347. ret = -ENOMEM;
  1348. map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
  1349. if (!map_name)
  1350. goto free;
  1351. mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
  1352. if (!mask_name)
  1353. goto free;
  1354. pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
  1355. if (!pass_name)
  1356. goto free;
  1357. ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
  1358. out_args);
  1359. if (ret)
  1360. goto free;
  1361. /* Get the #<list>-cells property */
  1362. cur = out_args->np;
  1363. ret = of_property_read_u32(cur, cells_name, &list_size);
  1364. if (ret < 0)
  1365. goto put;
  1366. /* Precalculate the match array - this simplifies match loop */
  1367. for (i = 0; i < list_size; i++)
  1368. initial_match_array[i] = cpu_to_be32(out_args->args[i]);
  1369. ret = -EINVAL;
  1370. while (cur) {
  1371. /* Get the <list>-map property */
  1372. map = of_get_property(cur, map_name, &map_len);
  1373. if (!map) {
  1374. ret = 0;
  1375. goto free;
  1376. }
  1377. map_len /= sizeof(u32);
  1378. /* Get the <list>-map-mask property (optional) */
  1379. mask = of_get_property(cur, mask_name, NULL);
  1380. if (!mask)
  1381. mask = dummy_mask;
  1382. /* Iterate through <list>-map property */
  1383. match = 0;
  1384. while (map_len > (list_size + 1) && !match) {
  1385. /* Compare specifiers */
  1386. match = 1;
  1387. for (i = 0; i < list_size; i++, map_len--)
  1388. match &= !((match_array[i] ^ *map++) & mask[i]);
  1389. of_node_put(new);
  1390. new = of_find_node_by_phandle(be32_to_cpup(map));
  1391. map++;
  1392. map_len--;
  1393. /* Check if not found */
  1394. if (!new)
  1395. goto put;
  1396. if (!of_device_is_available(new))
  1397. match = 0;
  1398. ret = of_property_read_u32(new, cells_name, &new_size);
  1399. if (ret)
  1400. goto put;
  1401. /* Check for malformed properties */
  1402. if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
  1403. goto put;
  1404. if (map_len < new_size)
  1405. goto put;
  1406. /* Move forward by new node's #<list>-cells amount */
  1407. map += new_size;
  1408. map_len -= new_size;
  1409. }
  1410. if (!match)
  1411. goto put;
  1412. /* Get the <list>-map-pass-thru property (optional) */
  1413. pass = of_get_property(cur, pass_name, NULL);
  1414. if (!pass)
  1415. pass = dummy_pass;
  1416. /*
  1417. * Successfully parsed a <list>-map translation; copy new
  1418. * specifier into the out_args structure, keeping the
  1419. * bits specified in <list>-map-pass-thru.
  1420. */
  1421. match_array = map - new_size;
  1422. for (i = 0; i < new_size; i++) {
  1423. __be32 val = *(map - new_size + i);
  1424. if (i < list_size) {
  1425. val &= ~pass[i];
  1426. val |= cpu_to_be32(out_args->args[i]) & pass[i];
  1427. }
  1428. out_args->args[i] = be32_to_cpu(val);
  1429. }
  1430. out_args->args_count = list_size = new_size;
  1431. /* Iterate again with new provider */
  1432. out_args->np = new;
  1433. of_node_put(cur);
  1434. cur = new;
  1435. }
  1436. put:
  1437. of_node_put(cur);
  1438. of_node_put(new);
  1439. free:
  1440. kfree(mask_name);
  1441. kfree(map_name);
  1442. kfree(cells_name);
  1443. kfree(pass_name);
  1444. return ret;
  1445. }
  1446. EXPORT_SYMBOL(of_parse_phandle_with_args_map);
  1447. /**
  1448. * of_count_phandle_with_args() - Find the number of phandles references in a property
  1449. * @np: pointer to a device tree node containing a list
  1450. * @list_name: property name that contains a list
  1451. * @cells_name: property name that specifies phandles' arguments count
  1452. *
  1453. * Return: The number of phandle + argument tuples within a property. It
  1454. * is a typical pattern to encode a list of phandle and variable
  1455. * arguments into a single property. The number of arguments is encoded
  1456. * by a property in the phandle-target node. For example, a gpios
  1457. * property would contain a list of GPIO specifies consisting of a
  1458. * phandle and 1 or more arguments. The number of arguments are
  1459. * determined by the #gpio-cells property in the node pointed to by the
  1460. * phandle.
  1461. */
  1462. int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
  1463. const char *cells_name)
  1464. {
  1465. struct of_phandle_iterator it;
  1466. int rc, cur_index = 0;
  1467. /*
  1468. * If cells_name is NULL we assume a cell count of 0. This makes
  1469. * counting the phandles trivial as each 32bit word in the list is a
  1470. * phandle and no arguments are to consider. So we don't iterate through
  1471. * the list but just use the length to determine the phandle count.
  1472. */
  1473. if (!cells_name) {
  1474. const __be32 *list;
  1475. int size;
  1476. list = of_get_property(np, list_name, &size);
  1477. if (!list)
  1478. return -ENOENT;
  1479. return size / sizeof(*list);
  1480. }
  1481. rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
  1482. if (rc)
  1483. return rc;
  1484. while ((rc = of_phandle_iterator_next(&it)) == 0)
  1485. cur_index += 1;
  1486. if (rc != -ENOENT)
  1487. return rc;
  1488. return cur_index;
  1489. }
  1490. EXPORT_SYMBOL(of_count_phandle_with_args);
  1491. /**
  1492. * __of_add_property - Add a property to a node without lock operations
  1493. * @np: Caller's Device Node
  1494. * @prop: Property to add
  1495. */
  1496. int __of_add_property(struct device_node *np, struct property *prop)
  1497. {
  1498. struct property **next;
  1499. prop->next = NULL;
  1500. next = &np->properties;
  1501. while (*next) {
  1502. if (strcmp(prop->name, (*next)->name) == 0)
  1503. /* duplicate ! don't insert it */
  1504. return -EEXIST;
  1505. next = &(*next)->next;
  1506. }
  1507. *next = prop;
  1508. return 0;
  1509. }
  1510. /**
  1511. * of_add_property - Add a property to a node
  1512. * @np: Caller's Device Node
  1513. * @prop: Property to add
  1514. */
  1515. int of_add_property(struct device_node *np, struct property *prop)
  1516. {
  1517. unsigned long flags;
  1518. int rc;
  1519. mutex_lock(&of_mutex);
  1520. raw_spin_lock_irqsave(&devtree_lock, flags);
  1521. rc = __of_add_property(np, prop);
  1522. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  1523. if (!rc)
  1524. __of_add_property_sysfs(np, prop);
  1525. mutex_unlock(&of_mutex);
  1526. if (!rc)
  1527. of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
  1528. return rc;
  1529. }
  1530. EXPORT_SYMBOL_GPL(of_add_property);
  1531. int __of_remove_property(struct device_node *np, struct property *prop)
  1532. {
  1533. struct property **next;
  1534. for (next = &np->properties; *next; next = &(*next)->next) {
  1535. if (*next == prop)
  1536. break;
  1537. }
  1538. if (*next == NULL)
  1539. return -ENODEV;
  1540. /* found the node */
  1541. *next = prop->next;
  1542. prop->next = np->deadprops;
  1543. np->deadprops = prop;
  1544. return 0;
  1545. }
  1546. /**
  1547. * of_remove_property - Remove a property from a node.
  1548. * @np: Caller's Device Node
  1549. * @prop: Property to remove
  1550. *
  1551. * Note that we don't actually remove it, since we have given out
  1552. * who-knows-how-many pointers to the data using get-property.
  1553. * Instead we just move the property to the "dead properties"
  1554. * list, so it won't be found any more.
  1555. */
  1556. int of_remove_property(struct device_node *np, struct property *prop)
  1557. {
  1558. unsigned long flags;
  1559. int rc;
  1560. if (!prop)
  1561. return -ENODEV;
  1562. mutex_lock(&of_mutex);
  1563. raw_spin_lock_irqsave(&devtree_lock, flags);
  1564. rc = __of_remove_property(np, prop);
  1565. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  1566. if (!rc)
  1567. __of_remove_property_sysfs(np, prop);
  1568. mutex_unlock(&of_mutex);
  1569. if (!rc)
  1570. of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
  1571. return rc;
  1572. }
  1573. EXPORT_SYMBOL_GPL(of_remove_property);
  1574. int __of_update_property(struct device_node *np, struct property *newprop,
  1575. struct property **oldpropp)
  1576. {
  1577. struct property **next, *oldprop;
  1578. for (next = &np->properties; *next; next = &(*next)->next) {
  1579. if (of_prop_cmp((*next)->name, newprop->name) == 0)
  1580. break;
  1581. }
  1582. *oldpropp = oldprop = *next;
  1583. if (oldprop) {
  1584. /* replace the node */
  1585. newprop->next = oldprop->next;
  1586. *next = newprop;
  1587. oldprop->next = np->deadprops;
  1588. np->deadprops = oldprop;
  1589. } else {
  1590. /* new node */
  1591. newprop->next = NULL;
  1592. *next = newprop;
  1593. }
  1594. return 0;
  1595. }
  1596. /*
  1597. * of_update_property - Update a property in a node, if the property does
  1598. * not exist, add it.
  1599. *
  1600. * Note that we don't actually remove it, since we have given out
  1601. * who-knows-how-many pointers to the data using get-property.
  1602. * Instead we just move the property to the "dead properties" list,
  1603. * and add the new property to the property list
  1604. */
  1605. int of_update_property(struct device_node *np, struct property *newprop)
  1606. {
  1607. struct property *oldprop;
  1608. unsigned long flags;
  1609. int rc;
  1610. if (!newprop->name)
  1611. return -EINVAL;
  1612. mutex_lock(&of_mutex);
  1613. raw_spin_lock_irqsave(&devtree_lock, flags);
  1614. rc = __of_update_property(np, newprop, &oldprop);
  1615. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  1616. if (!rc)
  1617. __of_update_property_sysfs(np, newprop, oldprop);
  1618. mutex_unlock(&of_mutex);
  1619. if (!rc)
  1620. of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
  1621. return rc;
  1622. }
  1623. static void of_alias_add(struct alias_prop *ap, struct device_node *np,
  1624. int id, const char *stem, int stem_len)
  1625. {
  1626. ap->np = np;
  1627. ap->id = id;
  1628. strncpy(ap->stem, stem, stem_len);
  1629. ap->stem[stem_len] = 0;
  1630. list_add_tail(&ap->link, &aliases_lookup);
  1631. pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
  1632. ap->alias, ap->stem, ap->id, np);
  1633. }
  1634. /**
  1635. * of_alias_scan - Scan all properties of the 'aliases' node
  1636. * @dt_alloc: An allocator that provides a virtual address to memory
  1637. * for storing the resulting tree
  1638. *
  1639. * The function scans all the properties of the 'aliases' node and populates
  1640. * the global lookup table with the properties. It returns the
  1641. * number of alias properties found, or an error code in case of failure.
  1642. */
  1643. void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
  1644. {
  1645. struct property *pp;
  1646. of_aliases = of_find_node_by_path("/aliases");
  1647. of_chosen = of_find_node_by_path("/chosen");
  1648. if (of_chosen == NULL)
  1649. of_chosen = of_find_node_by_path("/chosen@0");
  1650. if (of_chosen) {
  1651. /* linux,stdout-path and /aliases/stdout are for legacy compatibility */
  1652. const char *name = NULL;
  1653. if (of_property_read_string(of_chosen, "stdout-path", &name))
  1654. of_property_read_string(of_chosen, "linux,stdout-path",
  1655. &name);
  1656. if (IS_ENABLED(CONFIG_PPC) && !name)
  1657. of_property_read_string(of_aliases, "stdout", &name);
  1658. if (name)
  1659. of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
  1660. if (of_stdout)
  1661. of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT;
  1662. }
  1663. if (!of_aliases)
  1664. return;
  1665. for_each_property_of_node(of_aliases, pp) {
  1666. const char *start = pp->name;
  1667. const char *end = start + strlen(start);
  1668. struct device_node *np;
  1669. struct alias_prop *ap;
  1670. int id, len;
  1671. /* Skip those we do not want to proceed */
  1672. if (!strcmp(pp->name, "name") ||
  1673. !strcmp(pp->name, "phandle") ||
  1674. !strcmp(pp->name, "linux,phandle"))
  1675. continue;
  1676. np = of_find_node_by_path(pp->value);
  1677. if (!np)
  1678. continue;
  1679. /* walk the alias backwards to extract the id and work out
  1680. * the 'stem' string */
  1681. while (isdigit(*(end-1)) && end > start)
  1682. end--;
  1683. len = end - start;
  1684. if (kstrtoint(end, 10, &id) < 0)
  1685. continue;
  1686. /* Allocate an alias_prop with enough space for the stem */
  1687. ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
  1688. if (!ap)
  1689. continue;
  1690. memset(ap, 0, sizeof(*ap) + len + 1);
  1691. ap->alias = start;
  1692. of_alias_add(ap, np, id, start, len);
  1693. }
  1694. }
  1695. /**
  1696. * of_alias_get_id - Get alias id for the given device_node
  1697. * @np: Pointer to the given device_node
  1698. * @stem: Alias stem of the given device_node
  1699. *
  1700. * The function travels the lookup table to get the alias id for the given
  1701. * device_node and alias stem.
  1702. *
  1703. * Return: The alias id if found.
  1704. */
  1705. int of_alias_get_id(struct device_node *np, const char *stem)
  1706. {
  1707. struct alias_prop *app;
  1708. int id = -ENODEV;
  1709. mutex_lock(&of_mutex);
  1710. list_for_each_entry(app, &aliases_lookup, link) {
  1711. if (strcmp(app->stem, stem) != 0)
  1712. continue;
  1713. if (np == app->np) {
  1714. id = app->id;
  1715. break;
  1716. }
  1717. }
  1718. mutex_unlock(&of_mutex);
  1719. return id;
  1720. }
  1721. EXPORT_SYMBOL_GPL(of_alias_get_id);
  1722. /**
  1723. * of_alias_get_highest_id - Get highest alias id for the given stem
  1724. * @stem: Alias stem to be examined
  1725. *
  1726. * The function travels the lookup table to get the highest alias id for the
  1727. * given alias stem. It returns the alias id if found.
  1728. */
  1729. int of_alias_get_highest_id(const char *stem)
  1730. {
  1731. struct alias_prop *app;
  1732. int id = -ENODEV;
  1733. mutex_lock(&of_mutex);
  1734. list_for_each_entry(app, &aliases_lookup, link) {
  1735. if (strcmp(app->stem, stem) != 0)
  1736. continue;
  1737. if (app->id > id)
  1738. id = app->id;
  1739. }
  1740. mutex_unlock(&of_mutex);
  1741. return id;
  1742. }
  1743. EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
  1744. /**
  1745. * of_console_check() - Test and setup console for DT setup
  1746. * @dn: Pointer to device node
  1747. * @name: Name to use for preferred console without index. ex. "ttyS"
  1748. * @index: Index to use for preferred console.
  1749. *
  1750. * Check if the given device node matches the stdout-path property in the
  1751. * /chosen node. If it does then register it as the preferred console.
  1752. *
  1753. * Return: TRUE if console successfully setup. Otherwise return FALSE.
  1754. */
  1755. bool of_console_check(struct device_node *dn, char *name, int index)
  1756. {
  1757. if (!dn || dn != of_stdout || console_set_on_cmdline)
  1758. return false;
  1759. /*
  1760. * XXX: cast `options' to char pointer to suppress complication
  1761. * warnings: printk, UART and console drivers expect char pointer.
  1762. */
  1763. return !add_preferred_console(name, index, (char *)of_stdout_options);
  1764. }
  1765. EXPORT_SYMBOL_GPL(of_console_check);
  1766. /**
  1767. * of_find_next_cache_node - Find a node's subsidiary cache
  1768. * @np: node of type "cpu" or "cache"
  1769. *
  1770. * Return: A node pointer with refcount incremented, use
  1771. * of_node_put() on it when done. Caller should hold a reference
  1772. * to np.
  1773. */
  1774. struct device_node *of_find_next_cache_node(const struct device_node *np)
  1775. {
  1776. struct device_node *child, *cache_node;
  1777. cache_node = of_parse_phandle(np, "l2-cache", 0);
  1778. if (!cache_node)
  1779. cache_node = of_parse_phandle(np, "next-level-cache", 0);
  1780. if (cache_node)
  1781. return cache_node;
  1782. /* OF on pmac has nodes instead of properties named "l2-cache"
  1783. * beneath CPU nodes.
  1784. */
  1785. if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
  1786. for_each_child_of_node(np, child)
  1787. if (of_node_is_type(child, "cache"))
  1788. return child;
  1789. return NULL;
  1790. }
  1791. /**
  1792. * of_find_last_cache_level - Find the level at which the last cache is
  1793. * present for the given logical cpu
  1794. *
  1795. * @cpu: cpu number(logical index) for which the last cache level is needed
  1796. *
  1797. * Return: The level at which the last cache is present. It is exactly
  1798. * same as the total number of cache levels for the given logical cpu.
  1799. */
  1800. int of_find_last_cache_level(unsigned int cpu)
  1801. {
  1802. u32 cache_level = 0;
  1803. struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
  1804. while (np) {
  1805. of_node_put(prev);
  1806. prev = np;
  1807. np = of_find_next_cache_node(np);
  1808. }
  1809. of_property_read_u32(prev, "cache-level", &cache_level);
  1810. of_node_put(prev);
  1811. return cache_level;
  1812. }
  1813. /**
  1814. * of_map_id - Translate an ID through a downstream mapping.
  1815. * @np: root complex device node.
  1816. * @id: device ID to map.
  1817. * @map_name: property name of the map to use.
  1818. * @map_mask_name: optional property name of the mask to use.
  1819. * @target: optional pointer to a target device node.
  1820. * @id_out: optional pointer to receive the translated ID.
  1821. *
  1822. * Given a device ID, look up the appropriate implementation-defined
  1823. * platform ID and/or the target device which receives transactions on that
  1824. * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
  1825. * @id_out may be NULL if only the other is required. If @target points to
  1826. * a non-NULL device node pointer, only entries targeting that node will be
  1827. * matched; if it points to a NULL value, it will receive the device node of
  1828. * the first matching target phandle, with a reference held.
  1829. *
  1830. * Return: 0 on success or a standard error code on failure.
  1831. */
  1832. int of_map_id(struct device_node *np, u32 id,
  1833. const char *map_name, const char *map_mask_name,
  1834. struct device_node **target, u32 *id_out)
  1835. {
  1836. u32 map_mask, masked_id;
  1837. int map_len;
  1838. const __be32 *map = NULL;
  1839. if (!np || !map_name || (!target && !id_out))
  1840. return -EINVAL;
  1841. map = of_get_property(np, map_name, &map_len);
  1842. if (!map) {
  1843. if (target)
  1844. return -ENODEV;
  1845. /* Otherwise, no map implies no translation */
  1846. *id_out = id;
  1847. return 0;
  1848. }
  1849. if (!map_len || map_len % (4 * sizeof(*map))) {
  1850. pr_err("%pOF: Error: Bad %s length: %d\n", np,
  1851. map_name, map_len);
  1852. return -EINVAL;
  1853. }
  1854. /* The default is to select all bits. */
  1855. map_mask = 0xffffffff;
  1856. /*
  1857. * Can be overridden by "{iommu,msi}-map-mask" property.
  1858. * If of_property_read_u32() fails, the default is used.
  1859. */
  1860. if (map_mask_name)
  1861. of_property_read_u32(np, map_mask_name, &map_mask);
  1862. masked_id = map_mask & id;
  1863. for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
  1864. struct device_node *phandle_node;
  1865. u32 id_base = be32_to_cpup(map + 0);
  1866. u32 phandle = be32_to_cpup(map + 1);
  1867. u32 out_base = be32_to_cpup(map + 2);
  1868. u32 id_len = be32_to_cpup(map + 3);
  1869. if (id_base & ~map_mask) {
  1870. pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
  1871. np, map_name, map_name,
  1872. map_mask, id_base);
  1873. return -EFAULT;
  1874. }
  1875. if (masked_id < id_base || masked_id >= id_base + id_len)
  1876. continue;
  1877. phandle_node = of_find_node_by_phandle(phandle);
  1878. if (!phandle_node)
  1879. return -ENODEV;
  1880. if (target) {
  1881. if (*target)
  1882. of_node_put(phandle_node);
  1883. else
  1884. *target = phandle_node;
  1885. if (*target != phandle_node)
  1886. continue;
  1887. }
  1888. if (id_out)
  1889. *id_out = masked_id - id_base + out_base;
  1890. pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
  1891. np, map_name, map_mask, id_base, out_base,
  1892. id_len, id, masked_id - id_base + out_base);
  1893. return 0;
  1894. }
  1895. pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
  1896. id, target && *target ? *target : NULL);
  1897. /* Bypasses translation */
  1898. if (id_out)
  1899. *id_out = id;
  1900. return 0;
  1901. }
  1902. EXPORT_SYMBOL_GPL(of_map_id);