base.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Procedures for creating, accessing and interpreting the device tree.
  4. *
  5. * Paul Mackerras August 1996.
  6. * Copyright (C) 1996-2005 Paul Mackerras.
  7. *
  8. * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
  9. * {engebret|bergner}@us.ibm.com
  10. *
  11. * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net
  12. *
  13. * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and
  14. * Grant Likely.
  15. */
  16. #define pr_fmt(fmt) "OF: " fmt
  17. #include <linux/console.h>
  18. #include <linux/ctype.h>
  19. #include <linux/cpu.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/of_graph.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/slab.h>
  26. #include <linux/string.h>
  27. #include <linux/proc_fs.h>
  28. #include "of_private.h"
  29. LIST_HEAD(aliases_lookup);
  30. struct device_node *of_root;
  31. EXPORT_SYMBOL(of_root);
  32. struct device_node *of_chosen;
  33. struct device_node *of_aliases;
  34. struct device_node *of_stdout;
  35. static const char *of_stdout_options;
  36. struct kset *of_kset;
  37. /*
  38. * Used to protect the of_aliases, to hold off addition of nodes to sysfs.
  39. * This mutex must be held whenever modifications are being made to the
  40. * device tree. The of_{attach,detach}_node() and
  41. * of_{add,remove,update}_property() helpers make sure this happens.
  42. */
  43. DEFINE_MUTEX(of_mutex);
  44. /* use when traversing tree through the child, sibling,
  45. * or parent members of struct device_node.
  46. */
  47. DEFINE_RAW_SPINLOCK(devtree_lock);
  48. int of_n_addr_cells(struct device_node *np)
  49. {
  50. u32 cells;
  51. do {
  52. if (np->parent)
  53. np = np->parent;
  54. if (!of_property_read_u32(np, "#address-cells", &cells))
  55. return cells;
  56. } while (np->parent);
  57. /* No #address-cells property for the root node */
  58. return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
  59. }
  60. EXPORT_SYMBOL(of_n_addr_cells);
  61. int of_n_size_cells(struct device_node *np)
  62. {
  63. u32 cells;
  64. do {
  65. if (np->parent)
  66. np = np->parent;
  67. if (!of_property_read_u32(np, "#size-cells", &cells))
  68. return cells;
  69. } while (np->parent);
  70. /* No #size-cells property for the root node */
  71. return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
  72. }
  73. EXPORT_SYMBOL(of_n_size_cells);
  74. #ifdef CONFIG_NUMA
  75. int __weak of_node_to_nid(struct device_node *np)
  76. {
  77. return NUMA_NO_NODE;
  78. }
  79. #endif
  80. static struct device_node **phandle_cache;
  81. static u32 phandle_cache_mask;
  82. /*
  83. * Assumptions behind phandle_cache implementation:
  84. * - phandle property values are in a contiguous range of 1..n
  85. *
  86. * If the assumptions do not hold, then
  87. * - the phandle lookup overhead reduction provided by the cache
  88. * will likely be less
  89. */
  90. static void of_populate_phandle_cache(void)
  91. {
  92. unsigned long flags;
  93. u32 cache_entries;
  94. struct device_node *np;
  95. u32 phandles = 0;
  96. raw_spin_lock_irqsave(&devtree_lock, flags);
  97. kfree(phandle_cache);
  98. phandle_cache = NULL;
  99. for_each_of_allnodes(np)
  100. if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
  101. phandles++;
  102. cache_entries = roundup_pow_of_two(phandles);
  103. phandle_cache_mask = cache_entries - 1;
  104. phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
  105. GFP_ATOMIC);
  106. if (!phandle_cache)
  107. goto out;
  108. for_each_of_allnodes(np)
  109. if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
  110. phandle_cache[np->phandle & phandle_cache_mask] = np;
  111. out:
  112. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  113. }
  114. #ifndef CONFIG_MODULES
  115. static int __init of_free_phandle_cache(void)
  116. {
  117. unsigned long flags;
  118. raw_spin_lock_irqsave(&devtree_lock, flags);
  119. kfree(phandle_cache);
  120. phandle_cache = NULL;
  121. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  122. return 0;
  123. }
  124. late_initcall_sync(of_free_phandle_cache);
  125. #endif
  126. void __init of_core_init(void)
  127. {
  128. struct device_node *np;
  129. of_populate_phandle_cache();
  130. /* Create the kset, and register existing nodes */
  131. mutex_lock(&of_mutex);
  132. of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
  133. if (!of_kset) {
  134. mutex_unlock(&of_mutex);
  135. pr_err("failed to register existing nodes\n");
  136. return;
  137. }
  138. for_each_of_allnodes(np)
  139. __of_attach_node_sysfs(np);
  140. mutex_unlock(&of_mutex);
  141. /* Symlink in /proc as required by userspace ABI */
  142. if (of_root)
  143. proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
  144. }
  145. static struct property *__of_find_property(const struct device_node *np,
  146. const char *name, int *lenp)
  147. {
  148. struct property *pp;
  149. if (!np)
  150. return NULL;
  151. for (pp = np->properties; pp; pp = pp->next) {
  152. if (of_prop_cmp(pp->name, name) == 0) {
  153. if (lenp)
  154. *lenp = pp->length;
  155. break;
  156. }
  157. }
  158. return pp;
  159. }
  160. struct property *of_find_property(const struct device_node *np,
  161. const char *name,
  162. int *lenp)
  163. {
  164. struct property *pp;
  165. unsigned long flags;
  166. raw_spin_lock_irqsave(&devtree_lock, flags);
  167. pp = __of_find_property(np, name, lenp);
  168. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  169. return pp;
  170. }
  171. EXPORT_SYMBOL(of_find_property);
  172. struct device_node *__of_find_all_nodes(struct device_node *prev)
  173. {
  174. struct device_node *np;
  175. if (!prev) {
  176. np = of_root;
  177. } else if (prev->child) {
  178. np = prev->child;
  179. } else {
  180. /* Walk back up looking for a sibling, or the end of the structure */
  181. np = prev;
  182. while (np->parent && !np->sibling)
  183. np = np->parent;
  184. np = np->sibling; /* Might be null at the end of the tree */
  185. }
  186. return np;
  187. }
  188. /**
  189. * of_find_all_nodes - Get next node in global list
  190. * @prev: Previous node or NULL to start iteration
  191. * of_node_put() will be called on it
  192. *
  193. * Returns a node pointer with refcount incremented, use
  194. * of_node_put() on it when done.
  195. */
  196. struct device_node *of_find_all_nodes(struct device_node *prev)
  197. {
  198. struct device_node *np;
  199. unsigned long flags;
  200. raw_spin_lock_irqsave(&devtree_lock, flags);
  201. np = __of_find_all_nodes(prev);
  202. of_node_get(np);
  203. of_node_put(prev);
  204. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  205. return np;
  206. }
  207. EXPORT_SYMBOL(of_find_all_nodes);
  208. /*
  209. * Find a property with a given name for a given node
  210. * and return the value.
  211. */
  212. const void *__of_get_property(const struct device_node *np,
  213. const char *name, int *lenp)
  214. {
  215. struct property *pp = __of_find_property(np, name, lenp);
  216. return pp ? pp->value : NULL;
  217. }
  218. /*
  219. * Find a property with a given name for a given node
  220. * and return the value.
  221. */
  222. const void *of_get_property(const struct device_node *np, const char *name,
  223. int *lenp)
  224. {
  225. struct property *pp = of_find_property(np, name, lenp);
  226. return pp ? pp->value : NULL;
  227. }
  228. EXPORT_SYMBOL(of_get_property);
  229. /*
  230. * arch_match_cpu_phys_id - Match the given logical CPU and physical id
  231. *
  232. * @cpu: logical cpu index of a core/thread
  233. * @phys_id: physical identifier of a core/thread
  234. *
  235. * CPU logical to physical index mapping is architecture specific.
  236. * However this __weak function provides a default match of physical
  237. * id to logical cpu index. phys_id provided here is usually values read
  238. * from the device tree which must match the hardware internal registers.
  239. *
  240. * Returns true if the physical identifier and the logical cpu index
  241. * correspond to the same core/thread, false otherwise.
  242. */
  243. bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
  244. {
  245. return (u32)phys_id == cpu;
  246. }
  247. /**
  248. * Checks if the given "prop_name" property holds the physical id of the
  249. * core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not
  250. * NULL, local thread number within the core is returned in it.
  251. */
  252. static bool __of_find_n_match_cpu_property(struct device_node *cpun,
  253. const char *prop_name, int cpu, unsigned int *thread)
  254. {
  255. const __be32 *cell;
  256. int ac, prop_len, tid;
  257. u64 hwid;
  258. ac = of_n_addr_cells(cpun);
  259. cell = of_get_property(cpun, prop_name, &prop_len);
  260. if (!cell || !ac)
  261. return false;
  262. prop_len /= sizeof(*cell) * ac;
  263. for (tid = 0; tid < prop_len; tid++) {
  264. hwid = of_read_number(cell, ac);
  265. if (arch_match_cpu_phys_id(cpu, hwid)) {
  266. if (thread)
  267. *thread = tid;
  268. return true;
  269. }
  270. cell += ac;
  271. }
  272. return false;
  273. }
  274. /*
  275. * arch_find_n_match_cpu_physical_id - See if the given device node is
  276. * for the cpu corresponding to logical cpu 'cpu'. Return true if so,
  277. * else false. If 'thread' is non-NULL, the local thread number within the
  278. * core is returned in it.
  279. */
  280. bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
  281. int cpu, unsigned int *thread)
  282. {
  283. /* Check for non-standard "ibm,ppc-interrupt-server#s" property
  284. * for thread ids on PowerPC. If it doesn't exist fallback to
  285. * standard "reg" property.
  286. */
  287. if (IS_ENABLED(CONFIG_PPC) &&
  288. __of_find_n_match_cpu_property(cpun,
  289. "ibm,ppc-interrupt-server#s",
  290. cpu, thread))
  291. return true;
  292. return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
  293. }
  294. /**
  295. * of_get_cpu_node - Get device node associated with the given logical CPU
  296. *
  297. * @cpu: CPU number(logical index) for which device node is required
  298. * @thread: if not NULL, local thread number within the physical core is
  299. * returned
  300. *
  301. * The main purpose of this function is to retrieve the device node for the
  302. * given logical CPU index. It should be used to initialize the of_node in
  303. * cpu device. Once of_node in cpu device is populated, all the further
  304. * references can use that instead.
  305. *
  306. * CPU logical to physical index mapping is architecture specific and is built
  307. * before booting secondary cores. This function uses arch_match_cpu_phys_id
  308. * which can be overridden by architecture specific implementation.
  309. *
  310. * Returns a node pointer for the logical cpu with refcount incremented, use
  311. * of_node_put() on it when done. Returns NULL if not found.
  312. */
  313. struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
  314. {
  315. struct device_node *cpun;
  316. for_each_node_by_type(cpun, "cpu") {
  317. if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
  318. return cpun;
  319. }
  320. return NULL;
  321. }
  322. EXPORT_SYMBOL(of_get_cpu_node);
  323. /**
  324. * of_cpu_node_to_id: Get the logical CPU number for a given device_node
  325. *
  326. * @cpu_node: Pointer to the device_node for CPU.
  327. *
  328. * Returns the logical CPU number of the given CPU device_node.
  329. * Returns -ENODEV if the CPU is not found.
  330. */
  331. int of_cpu_node_to_id(struct device_node *cpu_node)
  332. {
  333. int cpu;
  334. bool found = false;
  335. struct device_node *np;
  336. for_each_possible_cpu(cpu) {
  337. np = of_cpu_device_node_get(cpu);
  338. found = (cpu_node == np);
  339. of_node_put(np);
  340. if (found)
  341. return cpu;
  342. }
  343. return -ENODEV;
  344. }
  345. EXPORT_SYMBOL(of_cpu_node_to_id);
  346. /**
  347. * __of_device_is_compatible() - Check if the node matches given constraints
  348. * @device: pointer to node
  349. * @compat: required compatible string, NULL or "" for any match
  350. * @type: required device_type value, NULL or "" for any match
  351. * @name: required node name, NULL or "" for any match
  352. *
  353. * Checks if the given @compat, @type and @name strings match the
  354. * properties of the given @device. A constraints can be skipped by
  355. * passing NULL or an empty string as the constraint.
  356. *
  357. * Returns 0 for no match, and a positive integer on match. The return
  358. * value is a relative score with larger values indicating better
  359. * matches. The score is weighted for the most specific compatible value
  360. * to get the highest score. Matching type is next, followed by matching
  361. * name. Practically speaking, this results in the following priority
  362. * order for matches:
  363. *
  364. * 1. specific compatible && type && name
  365. * 2. specific compatible && type
  366. * 3. specific compatible && name
  367. * 4. specific compatible
  368. * 5. general compatible && type && name
  369. * 6. general compatible && type
  370. * 7. general compatible && name
  371. * 8. general compatible
  372. * 9. type && name
  373. * 10. type
  374. * 11. name
  375. */
  376. static int __of_device_is_compatible(const struct device_node *device,
  377. const char *compat, const char *type, const char *name)
  378. {
  379. struct property *prop;
  380. const char *cp;
  381. int index = 0, score = 0;
  382. /* Compatible match has highest priority */
  383. if (compat && compat[0]) {
  384. prop = __of_find_property(device, "compatible", NULL);
  385. for (cp = of_prop_next_string(prop, NULL); cp;
  386. cp = of_prop_next_string(prop, cp), index++) {
  387. if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
  388. score = INT_MAX/2 - (index << 2);
  389. break;
  390. }
  391. }
  392. if (!score)
  393. return 0;
  394. }
  395. /* Matching type is better than matching name */
  396. if (type && type[0]) {
  397. if (!device->type || of_node_cmp(type, device->type))
  398. return 0;
  399. score += 2;
  400. }
  401. /* Matching name is a bit better than not */
  402. if (name && name[0]) {
  403. if (!device->name || of_node_cmp(name, device->name))
  404. return 0;
  405. score++;
  406. }
  407. return score;
  408. }
  409. /** Checks if the given "compat" string matches one of the strings in
  410. * the device's "compatible" property
  411. */
  412. int of_device_is_compatible(const struct device_node *device,
  413. const char *compat)
  414. {
  415. unsigned long flags;
  416. int res;
  417. raw_spin_lock_irqsave(&devtree_lock, flags);
  418. res = __of_device_is_compatible(device, compat, NULL, NULL);
  419. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  420. return res;
  421. }
  422. EXPORT_SYMBOL(of_device_is_compatible);
  423. /** Checks if the device is compatible with any of the entries in
  424. * a NULL terminated array of strings. Returns the best match
  425. * score or 0.
  426. */
  427. int of_device_compatible_match(struct device_node *device,
  428. const char *const *compat)
  429. {
  430. unsigned int tmp, score = 0;
  431. if (!compat)
  432. return 0;
  433. while (*compat) {
  434. tmp = of_device_is_compatible(device, *compat);
  435. if (tmp > score)
  436. score = tmp;
  437. compat++;
  438. }
  439. return score;
  440. }
  441. /**
  442. * of_machine_is_compatible - Test root of device tree for a given compatible value
  443. * @compat: compatible string to look for in root node's compatible property.
  444. *
  445. * Returns a positive integer if the root node has the given value in its
  446. * compatible property.
  447. */
  448. int of_machine_is_compatible(const char *compat)
  449. {
  450. struct device_node *root;
  451. int rc = 0;
  452. root = of_find_node_by_path("/");
  453. if (root) {
  454. rc = of_device_is_compatible(root, compat);
  455. of_node_put(root);
  456. }
  457. return rc;
  458. }
  459. EXPORT_SYMBOL(of_machine_is_compatible);
  460. /**
  461. * __of_device_is_available - check if a device is available for use
  462. *
  463. * @device: Node to check for availability, with locks already held
  464. *
  465. * Returns true if the status property is absent or set to "okay" or "ok",
  466. * false otherwise
  467. */
  468. static bool __of_device_is_available(const struct device_node *device)
  469. {
  470. const char *status;
  471. int statlen;
  472. if (!device)
  473. return false;
  474. status = __of_get_property(device, "status", &statlen);
  475. if (status == NULL)
  476. return true;
  477. if (statlen > 0) {
  478. if (!strcmp(status, "okay") || !strcmp(status, "ok"))
  479. return true;
  480. }
  481. return false;
  482. }
  483. /**
  484. * of_device_is_available - check if a device is available for use
  485. *
  486. * @device: Node to check for availability
  487. *
  488. * Returns true if the status property is absent or set to "okay" or "ok",
  489. * false otherwise
  490. */
  491. bool of_device_is_available(const struct device_node *device)
  492. {
  493. unsigned long flags;
  494. bool res;
  495. raw_spin_lock_irqsave(&devtree_lock, flags);
  496. res = __of_device_is_available(device);
  497. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  498. return res;
  499. }
  500. EXPORT_SYMBOL(of_device_is_available);
  501. /**
  502. * of_device_is_big_endian - check if a device has BE registers
  503. *
  504. * @device: Node to check for endianness
  505. *
  506. * Returns true if the device has a "big-endian" property, or if the kernel
  507. * was compiled for BE *and* the device has a "native-endian" property.
  508. * Returns false otherwise.
  509. *
  510. * Callers would nominally use ioread32be/iowrite32be if
  511. * of_device_is_big_endian() == true, or readl/writel otherwise.
  512. */
  513. bool of_device_is_big_endian(const struct device_node *device)
  514. {
  515. if (of_property_read_bool(device, "big-endian"))
  516. return true;
  517. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
  518. of_property_read_bool(device, "native-endian"))
  519. return true;
  520. return false;
  521. }
  522. EXPORT_SYMBOL(of_device_is_big_endian);
  523. /**
  524. * of_get_parent - Get a node's parent if any
  525. * @node: Node to get parent
  526. *
  527. * Returns a node pointer with refcount incremented, use
  528. * of_node_put() on it when done.
  529. */
  530. struct device_node *of_get_parent(const struct device_node *node)
  531. {
  532. struct device_node *np;
  533. unsigned long flags;
  534. if (!node)
  535. return NULL;
  536. raw_spin_lock_irqsave(&devtree_lock, flags);
  537. np = of_node_get(node->parent);
  538. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  539. return np;
  540. }
  541. EXPORT_SYMBOL(of_get_parent);
  542. /**
  543. * of_get_next_parent - Iterate to a node's parent
  544. * @node: Node to get parent of
  545. *
  546. * This is like of_get_parent() except that it drops the
  547. * refcount on the passed node, making it suitable for iterating
  548. * through a node's parents.
  549. *
  550. * Returns a node pointer with refcount incremented, use
  551. * of_node_put() on it when done.
  552. */
  553. struct device_node *of_get_next_parent(struct device_node *node)
  554. {
  555. struct device_node *parent;
  556. unsigned long flags;
  557. if (!node)
  558. return NULL;
  559. raw_spin_lock_irqsave(&devtree_lock, flags);
  560. parent = of_node_get(node->parent);
  561. of_node_put(node);
  562. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  563. return parent;
  564. }
  565. EXPORT_SYMBOL(of_get_next_parent);
  566. static struct device_node *__of_get_next_child(const struct device_node *node,
  567. struct device_node *prev)
  568. {
  569. struct device_node *next;
  570. if (!node)
  571. return NULL;
  572. next = prev ? prev->sibling : node->child;
  573. for (; next; next = next->sibling)
  574. if (of_node_get(next))
  575. break;
  576. of_node_put(prev);
  577. return next;
  578. }
  579. #define __for_each_child_of_node(parent, child) \
  580. for (child = __of_get_next_child(parent, NULL); child != NULL; \
  581. child = __of_get_next_child(parent, child))
  582. /**
  583. * of_get_next_child - Iterate a node childs
  584. * @node: parent node
  585. * @prev: previous child of the parent node, or NULL to get first
  586. *
  587. * Returns a node pointer with refcount incremented, use of_node_put() on
  588. * it when done. Returns NULL when prev is the last child. Decrements the
  589. * refcount of prev.
  590. */
  591. struct device_node *of_get_next_child(const struct device_node *node,
  592. struct device_node *prev)
  593. {
  594. struct device_node *next;
  595. unsigned long flags;
  596. raw_spin_lock_irqsave(&devtree_lock, flags);
  597. next = __of_get_next_child(node, prev);
  598. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  599. return next;
  600. }
  601. EXPORT_SYMBOL(of_get_next_child);
  602. /**
  603. * of_get_next_available_child - Find the next available child node
  604. * @node: parent node
  605. * @prev: previous child of the parent node, or NULL to get first
  606. *
  607. * This function is like of_get_next_child(), except that it
  608. * automatically skips any disabled nodes (i.e. status = "disabled").
  609. */
  610. struct device_node *of_get_next_available_child(const struct device_node *node,
  611. struct device_node *prev)
  612. {
  613. struct device_node *next;
  614. unsigned long flags;
  615. if (!node)
  616. return NULL;
  617. raw_spin_lock_irqsave(&devtree_lock, flags);
  618. next = prev ? prev->sibling : node->child;
  619. for (; next; next = next->sibling) {
  620. if (!__of_device_is_available(next))
  621. continue;
  622. if (of_node_get(next))
  623. break;
  624. }
  625. of_node_put(prev);
  626. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  627. return next;
  628. }
  629. EXPORT_SYMBOL(of_get_next_available_child);
  630. /**
  631. * of_get_child_by_name - Find the child node by name for a given parent
  632. * @node: parent node
  633. * @name: child name to look for.
  634. *
  635. * This function looks for child node for given matching name
  636. *
  637. * Returns a node pointer if found, with refcount incremented, use
  638. * of_node_put() on it when done.
  639. * Returns NULL if node is not found.
  640. */
  641. struct device_node *of_get_child_by_name(const struct device_node *node,
  642. const char *name)
  643. {
  644. struct device_node *child;
  645. for_each_child_of_node(node, child)
  646. if (child->name && (of_node_cmp(child->name, name) == 0))
  647. break;
  648. return child;
  649. }
  650. EXPORT_SYMBOL(of_get_child_by_name);
  651. struct device_node *__of_find_node_by_path(struct device_node *parent,
  652. const char *path)
  653. {
  654. struct device_node *child;
  655. int len;
  656. len = strcspn(path, "/:");
  657. if (!len)
  658. return NULL;
  659. __for_each_child_of_node(parent, child) {
  660. const char *name = kbasename(child->full_name);
  661. if (strncmp(path, name, len) == 0 && (strlen(name) == len))
  662. return child;
  663. }
  664. return NULL;
  665. }
  666. struct device_node *__of_find_node_by_full_path(struct device_node *node,
  667. const char *path)
  668. {
  669. const char *separator = strchr(path, ':');
  670. while (node && *path == '/') {
  671. struct device_node *tmp = node;
  672. path++; /* Increment past '/' delimiter */
  673. node = __of_find_node_by_path(node, path);
  674. of_node_put(tmp);
  675. path = strchrnul(path, '/');
  676. if (separator && separator < path)
  677. break;
  678. }
  679. return node;
  680. }
  681. /**
  682. * of_find_node_opts_by_path - Find a node matching a full OF path
  683. * @path: Either the full path to match, or if the path does not
  684. * start with '/', the name of a property of the /aliases
  685. * node (an alias). In the case of an alias, the node
  686. * matching the alias' value will be returned.
  687. * @opts: Address of a pointer into which to store the start of
  688. * an options string appended to the end of the path with
  689. * a ':' separator.
  690. *
  691. * Valid paths:
  692. * /foo/bar Full path
  693. * foo Valid alias
  694. * foo/bar Valid alias + relative path
  695. *
  696. * Returns a node pointer with refcount incremented, use
  697. * of_node_put() on it when done.
  698. */
  699. struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
  700. {
  701. struct device_node *np = NULL;
  702. struct property *pp;
  703. unsigned long flags;
  704. const char *separator = strchr(path, ':');
  705. if (opts)
  706. *opts = separator ? separator + 1 : NULL;
  707. if (strcmp(path, "/") == 0)
  708. return of_node_get(of_root);
  709. /* The path could begin with an alias */
  710. if (*path != '/') {
  711. int len;
  712. const char *p = separator;
  713. if (!p)
  714. p = strchrnul(path, '/');
  715. len = p - path;
  716. /* of_aliases must not be NULL */
  717. if (!of_aliases)
  718. return NULL;
  719. for_each_property_of_node(of_aliases, pp) {
  720. if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
  721. np = of_find_node_by_path(pp->value);
  722. break;
  723. }
  724. }
  725. if (!np)
  726. return NULL;
  727. path = p;
  728. }
  729. /* Step down the tree matching path components */
  730. raw_spin_lock_irqsave(&devtree_lock, flags);
  731. if (!np)
  732. np = of_node_get(of_root);
  733. np = __of_find_node_by_full_path(np, path);
  734. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  735. return np;
  736. }
  737. EXPORT_SYMBOL(of_find_node_opts_by_path);
  738. /**
  739. * of_find_node_by_name - Find a node by its "name" property
  740. * @from: The node to start searching from or NULL; the node
  741. * you pass will not be searched, only the next one
  742. * will. Typically, you pass what the previous call
  743. * returned. of_node_put() will be called on @from.
  744. * @name: The name string to match against
  745. *
  746. * Returns a node pointer with refcount incremented, use
  747. * of_node_put() on it when done.
  748. */
  749. struct device_node *of_find_node_by_name(struct device_node *from,
  750. const char *name)
  751. {
  752. struct device_node *np;
  753. unsigned long flags;
  754. raw_spin_lock_irqsave(&devtree_lock, flags);
  755. for_each_of_allnodes_from(from, np)
  756. if (np->name && (of_node_cmp(np->name, name) == 0)
  757. && of_node_get(np))
  758. break;
  759. of_node_put(from);
  760. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  761. return np;
  762. }
  763. EXPORT_SYMBOL(of_find_node_by_name);
  764. /**
  765. * of_find_node_by_type - Find a node by its "device_type" property
  766. * @from: The node to start searching from, or NULL to start searching
  767. * the entire device tree. The node you pass will not be
  768. * searched, only the next one will; typically, you pass
  769. * what the previous call returned. of_node_put() will be
  770. * called on from for you.
  771. * @type: The type string to match against
  772. *
  773. * Returns a node pointer with refcount incremented, use
  774. * of_node_put() on it when done.
  775. */
  776. struct device_node *of_find_node_by_type(struct device_node *from,
  777. const char *type)
  778. {
  779. struct device_node *np;
  780. unsigned long flags;
  781. raw_spin_lock_irqsave(&devtree_lock, flags);
  782. for_each_of_allnodes_from(from, np)
  783. if (np->type && (of_node_cmp(np->type, type) == 0)
  784. && of_node_get(np))
  785. break;
  786. of_node_put(from);
  787. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  788. return np;
  789. }
  790. EXPORT_SYMBOL(of_find_node_by_type);
  791. /**
  792. * of_find_compatible_node - Find a node based on type and one of the
  793. * tokens in its "compatible" property
  794. * @from: The node to start searching from or NULL, the node
  795. * you pass will not be searched, only the next one
  796. * will; typically, you pass what the previous call
  797. * returned. of_node_put() will be called on it
  798. * @type: The type string to match "device_type" or NULL to ignore
  799. * @compatible: The string to match to one of the tokens in the device
  800. * "compatible" list.
  801. *
  802. * Returns a node pointer with refcount incremented, use
  803. * of_node_put() on it when done.
  804. */
  805. struct device_node *of_find_compatible_node(struct device_node *from,
  806. const char *type, const char *compatible)
  807. {
  808. struct device_node *np;
  809. unsigned long flags;
  810. raw_spin_lock_irqsave(&devtree_lock, flags);
  811. for_each_of_allnodes_from(from, np)
  812. if (__of_device_is_compatible(np, compatible, type, NULL) &&
  813. of_node_get(np))
  814. break;
  815. of_node_put(from);
  816. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  817. return np;
  818. }
  819. EXPORT_SYMBOL(of_find_compatible_node);
  820. /**
  821. * of_find_node_with_property - Find a node which has a property with
  822. * the given name.
  823. * @from: The node to start searching from or NULL, the node
  824. * you pass will not be searched, only the next one
  825. * will; typically, you pass what the previous call
  826. * returned. of_node_put() will be called on it
  827. * @prop_name: The name of the property to look for.
  828. *
  829. * Returns a node pointer with refcount incremented, use
  830. * of_node_put() on it when done.
  831. */
  832. struct device_node *of_find_node_with_property(struct device_node *from,
  833. const char *prop_name)
  834. {
  835. struct device_node *np;
  836. struct property *pp;
  837. unsigned long flags;
  838. raw_spin_lock_irqsave(&devtree_lock, flags);
  839. for_each_of_allnodes_from(from, np) {
  840. for (pp = np->properties; pp; pp = pp->next) {
  841. if (of_prop_cmp(pp->name, prop_name) == 0) {
  842. of_node_get(np);
  843. goto out;
  844. }
  845. }
  846. }
  847. out:
  848. of_node_put(from);
  849. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  850. return np;
  851. }
  852. EXPORT_SYMBOL(of_find_node_with_property);
  853. static
  854. const struct of_device_id *__of_match_node(const struct of_device_id *matches,
  855. const struct device_node *node)
  856. {
  857. const struct of_device_id *best_match = NULL;
  858. int score, best_score = 0;
  859. if (!matches)
  860. return NULL;
  861. for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
  862. score = __of_device_is_compatible(node, matches->compatible,
  863. matches->type, matches->name);
  864. if (score > best_score) {
  865. best_match = matches;
  866. best_score = score;
  867. }
  868. }
  869. return best_match;
  870. }
  871. /**
  872. * of_match_node - Tell if a device_node has a matching of_match structure
  873. * @matches: array of of device match structures to search in
  874. * @node: the of device structure to match against
  875. *
  876. * Low level utility function used by device matching.
  877. */
  878. const struct of_device_id *of_match_node(const struct of_device_id *matches,
  879. const struct device_node *node)
  880. {
  881. const struct of_device_id *match;
  882. unsigned long flags;
  883. raw_spin_lock_irqsave(&devtree_lock, flags);
  884. match = __of_match_node(matches, node);
  885. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  886. return match;
  887. }
  888. EXPORT_SYMBOL(of_match_node);
  889. /**
  890. * of_find_matching_node_and_match - Find a node based on an of_device_id
  891. * match table.
  892. * @from: The node to start searching from or NULL, the node
  893. * you pass will not be searched, only the next one
  894. * will; typically, you pass what the previous call
  895. * returned. of_node_put() will be called on it
  896. * @matches: array of of device match structures to search in
  897. * @match Updated to point at the matches entry which matched
  898. *
  899. * Returns a node pointer with refcount incremented, use
  900. * of_node_put() on it when done.
  901. */
  902. struct device_node *of_find_matching_node_and_match(struct device_node *from,
  903. const struct of_device_id *matches,
  904. const struct of_device_id **match)
  905. {
  906. struct device_node *np;
  907. const struct of_device_id *m;
  908. unsigned long flags;
  909. if (match)
  910. *match = NULL;
  911. raw_spin_lock_irqsave(&devtree_lock, flags);
  912. for_each_of_allnodes_from(from, np) {
  913. m = __of_match_node(matches, np);
  914. if (m && of_node_get(np)) {
  915. if (match)
  916. *match = m;
  917. break;
  918. }
  919. }
  920. of_node_put(from);
  921. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  922. return np;
  923. }
  924. EXPORT_SYMBOL(of_find_matching_node_and_match);
  925. /**
  926. * of_modalias_node - Lookup appropriate modalias for a device node
  927. * @node: pointer to a device tree node
  928. * @modalias: Pointer to buffer that modalias value will be copied into
  929. * @len: Length of modalias value
  930. *
  931. * Based on the value of the compatible property, this routine will attempt
  932. * to choose an appropriate modalias value for a particular device tree node.
  933. * It does this by stripping the manufacturer prefix (as delimited by a ',')
  934. * from the first entry in the compatible list property.
  935. *
  936. * This routine returns 0 on success, <0 on failure.
  937. */
  938. int of_modalias_node(struct device_node *node, char *modalias, int len)
  939. {
  940. const char *compatible, *p;
  941. int cplen;
  942. compatible = of_get_property(node, "compatible", &cplen);
  943. if (!compatible || strlen(compatible) > cplen)
  944. return -ENODEV;
  945. p = strchr(compatible, ',');
  946. strlcpy(modalias, p ? p + 1 : compatible, len);
  947. return 0;
  948. }
  949. EXPORT_SYMBOL_GPL(of_modalias_node);
  950. /**
  951. * of_find_node_by_phandle - Find a node given a phandle
  952. * @handle: phandle of the node to find
  953. *
  954. * Returns a node pointer with refcount incremented, use
  955. * of_node_put() on it when done.
  956. */
  957. struct device_node *of_find_node_by_phandle(phandle handle)
  958. {
  959. struct device_node *np = NULL;
  960. unsigned long flags;
  961. phandle masked_handle;
  962. if (!handle)
  963. return NULL;
  964. raw_spin_lock_irqsave(&devtree_lock, flags);
  965. masked_handle = handle & phandle_cache_mask;
  966. if (phandle_cache) {
  967. if (phandle_cache[masked_handle] &&
  968. handle == phandle_cache[masked_handle]->phandle)
  969. np = phandle_cache[masked_handle];
  970. }
  971. if (!np) {
  972. for_each_of_allnodes(np)
  973. if (np->phandle == handle) {
  974. if (phandle_cache)
  975. phandle_cache[masked_handle] = np;
  976. break;
  977. }
  978. }
  979. of_node_get(np);
  980. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  981. return np;
  982. }
  983. EXPORT_SYMBOL(of_find_node_by_phandle);
  984. void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
  985. {
  986. int i;
  987. printk("%s %pOF", msg, args->np);
  988. for (i = 0; i < args->args_count; i++) {
  989. const char delim = i ? ',' : ':';
  990. pr_cont("%c%08x", delim, args->args[i]);
  991. }
  992. pr_cont("\n");
  993. }
  994. int of_phandle_iterator_init(struct of_phandle_iterator *it,
  995. const struct device_node *np,
  996. const char *list_name,
  997. const char *cells_name,
  998. int cell_count)
  999. {
  1000. const __be32 *list;
  1001. int size;
  1002. memset(it, 0, sizeof(*it));
  1003. list = of_get_property(np, list_name, &size);
  1004. if (!list)
  1005. return -ENOENT;
  1006. it->cells_name = cells_name;
  1007. it->cell_count = cell_count;
  1008. it->parent = np;
  1009. it->list_end = list + size / sizeof(*list);
  1010. it->phandle_end = list;
  1011. it->cur = list;
  1012. return 0;
  1013. }
  1014. EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
  1015. int of_phandle_iterator_next(struct of_phandle_iterator *it)
  1016. {
  1017. uint32_t count = 0;
  1018. if (it->node) {
  1019. of_node_put(it->node);
  1020. it->node = NULL;
  1021. }
  1022. if (!it->cur || it->phandle_end >= it->list_end)
  1023. return -ENOENT;
  1024. it->cur = it->phandle_end;
  1025. /* If phandle is 0, then it is an empty entry with no arguments. */
  1026. it->phandle = be32_to_cpup(it->cur++);
  1027. if (it->phandle) {
  1028. /*
  1029. * Find the provider node and parse the #*-cells property to
  1030. * determine the argument length.
  1031. */
  1032. it->node = of_find_node_by_phandle(it->phandle);
  1033. if (it->cells_name) {
  1034. if (!it->node) {
  1035. pr_err("%pOF: could not find phandle\n",
  1036. it->parent);
  1037. goto err;
  1038. }
  1039. if (of_property_read_u32(it->node, it->cells_name,
  1040. &count)) {
  1041. pr_err("%pOF: could not get %s for %pOF\n",
  1042. it->parent,
  1043. it->cells_name,
  1044. it->node);
  1045. goto err;
  1046. }
  1047. } else {
  1048. count = it->cell_count;
  1049. }
  1050. /*
  1051. * Make sure that the arguments actually fit in the remaining
  1052. * property data length
  1053. */
  1054. if (it->cur + count > it->list_end) {
  1055. pr_err("%pOF: arguments longer than property\n",
  1056. it->parent);
  1057. goto err;
  1058. }
  1059. }
  1060. it->phandle_end = it->cur + count;
  1061. it->cur_count = count;
  1062. return 0;
  1063. err:
  1064. if (it->node) {
  1065. of_node_put(it->node);
  1066. it->node = NULL;
  1067. }
  1068. return -EINVAL;
  1069. }
  1070. EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
  1071. int of_phandle_iterator_args(struct of_phandle_iterator *it,
  1072. uint32_t *args,
  1073. int size)
  1074. {
  1075. int i, count;
  1076. count = it->cur_count;
  1077. if (WARN_ON(size < count))
  1078. count = size;
  1079. for (i = 0; i < count; i++)
  1080. args[i] = be32_to_cpup(it->cur++);
  1081. return count;
  1082. }
  1083. static int __of_parse_phandle_with_args(const struct device_node *np,
  1084. const char *list_name,
  1085. const char *cells_name,
  1086. int cell_count, int index,
  1087. struct of_phandle_args *out_args)
  1088. {
  1089. struct of_phandle_iterator it;
  1090. int rc, cur_index = 0;
  1091. /* Loop over the phandles until all the requested entry is found */
  1092. of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
  1093. /*
  1094. * All of the error cases bail out of the loop, so at
  1095. * this point, the parsing is successful. If the requested
  1096. * index matches, then fill the out_args structure and return,
  1097. * or return -ENOENT for an empty entry.
  1098. */
  1099. rc = -ENOENT;
  1100. if (cur_index == index) {
  1101. if (!it.phandle)
  1102. goto err;
  1103. if (out_args) {
  1104. int c;
  1105. c = of_phandle_iterator_args(&it,
  1106. out_args->args,
  1107. MAX_PHANDLE_ARGS);
  1108. out_args->np = it.node;
  1109. out_args->args_count = c;
  1110. } else {
  1111. of_node_put(it.node);
  1112. }
  1113. /* Found it! return success */
  1114. return 0;
  1115. }
  1116. cur_index++;
  1117. }
  1118. /*
  1119. * Unlock node before returning result; will be one of:
  1120. * -ENOENT : index is for empty phandle
  1121. * -EINVAL : parsing error on data
  1122. */
  1123. err:
  1124. of_node_put(it.node);
  1125. return rc;
  1126. }
  1127. /**
  1128. * of_parse_phandle - Resolve a phandle property to a device_node pointer
  1129. * @np: Pointer to device node holding phandle property
  1130. * @phandle_name: Name of property holding a phandle value
  1131. * @index: For properties holding a table of phandles, this is the index into
  1132. * the table
  1133. *
  1134. * Returns the device_node pointer with refcount incremented. Use
  1135. * of_node_put() on it when done.
  1136. */
  1137. struct device_node *of_parse_phandle(const struct device_node *np,
  1138. const char *phandle_name, int index)
  1139. {
  1140. struct of_phandle_args args;
  1141. if (index < 0)
  1142. return NULL;
  1143. if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
  1144. index, &args))
  1145. return NULL;
  1146. return args.np;
  1147. }
  1148. EXPORT_SYMBOL(of_parse_phandle);
  1149. /**
  1150. * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
  1151. * @np: pointer to a device tree node containing a list
  1152. * @list_name: property name that contains a list
  1153. * @cells_name: property name that specifies phandles' arguments count
  1154. * @index: index of a phandle to parse out
  1155. * @out_args: optional pointer to output arguments structure (will be filled)
  1156. *
  1157. * This function is useful to parse lists of phandles and their arguments.
  1158. * Returns 0 on success and fills out_args, on error returns appropriate
  1159. * errno value.
  1160. *
  1161. * Caller is responsible to call of_node_put() on the returned out_args->np
  1162. * pointer.
  1163. *
  1164. * Example:
  1165. *
  1166. * phandle1: node1 {
  1167. * #list-cells = <2>;
  1168. * }
  1169. *
  1170. * phandle2: node2 {
  1171. * #list-cells = <1>;
  1172. * }
  1173. *
  1174. * node3 {
  1175. * list = <&phandle1 1 2 &phandle2 3>;
  1176. * }
  1177. *
  1178. * To get a device_node of the `node2' node you may call this:
  1179. * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
  1180. */
  1181. int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
  1182. const char *cells_name, int index,
  1183. struct of_phandle_args *out_args)
  1184. {
  1185. if (index < 0)
  1186. return -EINVAL;
  1187. return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
  1188. index, out_args);
  1189. }
  1190. EXPORT_SYMBOL(of_parse_phandle_with_args);
  1191. /**
  1192. * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it
  1193. * @np: pointer to a device tree node containing a list
  1194. * @list_name: property name that contains a list
  1195. * @stem_name: stem of property names that specify phandles' arguments count
  1196. * @index: index of a phandle to parse out
  1197. * @out_args: optional pointer to output arguments structure (will be filled)
  1198. *
  1199. * This function is useful to parse lists of phandles and their arguments.
  1200. * Returns 0 on success and fills out_args, on error returns appropriate errno
  1201. * value. The difference between this function and of_parse_phandle_with_args()
  1202. * is that this API remaps a phandle if the node the phandle points to has
  1203. * a <@stem_name>-map property.
  1204. *
  1205. * Caller is responsible to call of_node_put() on the returned out_args->np
  1206. * pointer.
  1207. *
  1208. * Example:
  1209. *
  1210. * phandle1: node1 {
  1211. * #list-cells = <2>;
  1212. * }
  1213. *
  1214. * phandle2: node2 {
  1215. * #list-cells = <1>;
  1216. * }
  1217. *
  1218. * phandle3: node3 {
  1219. * #list-cells = <1>;
  1220. * list-map = <0 &phandle2 3>,
  1221. * <1 &phandle2 2>,
  1222. * <2 &phandle1 5 1>;
  1223. * list-map-mask = <0x3>;
  1224. * };
  1225. *
  1226. * node4 {
  1227. * list = <&phandle1 1 2 &phandle3 0>;
  1228. * }
  1229. *
  1230. * To get a device_node of the `node2' node you may call this:
  1231. * of_parse_phandle_with_args(node4, "list", "list", 1, &args);
  1232. */
  1233. int of_parse_phandle_with_args_map(const struct device_node *np,
  1234. const char *list_name,
  1235. const char *stem_name,
  1236. int index, struct of_phandle_args *out_args)
  1237. {
  1238. char *cells_name, *map_name = NULL, *mask_name = NULL;
  1239. char *pass_name = NULL;
  1240. struct device_node *cur, *new = NULL;
  1241. const __be32 *map, *mask, *pass;
  1242. static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
  1243. static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
  1244. __be32 initial_match_array[MAX_PHANDLE_ARGS];
  1245. const __be32 *match_array = initial_match_array;
  1246. int i, ret, map_len, match;
  1247. u32 list_size, new_size;
  1248. if (index < 0)
  1249. return -EINVAL;
  1250. cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
  1251. if (!cells_name)
  1252. return -ENOMEM;
  1253. ret = -ENOMEM;
  1254. map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
  1255. if (!map_name)
  1256. goto free;
  1257. mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
  1258. if (!mask_name)
  1259. goto free;
  1260. pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
  1261. if (!pass_name)
  1262. goto free;
  1263. ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index,
  1264. out_args);
  1265. if (ret)
  1266. goto free;
  1267. /* Get the #<list>-cells property */
  1268. cur = out_args->np;
  1269. ret = of_property_read_u32(cur, cells_name, &list_size);
  1270. if (ret < 0)
  1271. goto put;
  1272. /* Precalculate the match array - this simplifies match loop */
  1273. for (i = 0; i < list_size; i++)
  1274. initial_match_array[i] = cpu_to_be32(out_args->args[i]);
  1275. ret = -EINVAL;
  1276. while (cur) {
  1277. /* Get the <list>-map property */
  1278. map = of_get_property(cur, map_name, &map_len);
  1279. if (!map) {
  1280. ret = 0;
  1281. goto free;
  1282. }
  1283. map_len /= sizeof(u32);
  1284. /* Get the <list>-map-mask property (optional) */
  1285. mask = of_get_property(cur, mask_name, NULL);
  1286. if (!mask)
  1287. mask = dummy_mask;
  1288. /* Iterate through <list>-map property */
  1289. match = 0;
  1290. while (map_len > (list_size + 1) && !match) {
  1291. /* Compare specifiers */
  1292. match = 1;
  1293. for (i = 0; i < list_size; i++, map_len--)
  1294. match &= !((match_array[i] ^ *map++) & mask[i]);
  1295. of_node_put(new);
  1296. new = of_find_node_by_phandle(be32_to_cpup(map));
  1297. map++;
  1298. map_len--;
  1299. /* Check if not found */
  1300. if (!new)
  1301. goto put;
  1302. if (!of_device_is_available(new))
  1303. match = 0;
  1304. ret = of_property_read_u32(new, cells_name, &new_size);
  1305. if (ret)
  1306. goto put;
  1307. /* Check for malformed properties */
  1308. if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
  1309. goto put;
  1310. if (map_len < new_size)
  1311. goto put;
  1312. /* Move forward by new node's #<list>-cells amount */
  1313. map += new_size;
  1314. map_len -= new_size;
  1315. }
  1316. if (!match)
  1317. goto put;
  1318. /* Get the <list>-map-pass-thru property (optional) */
  1319. pass = of_get_property(cur, pass_name, NULL);
  1320. if (!pass)
  1321. pass = dummy_pass;
  1322. /*
  1323. * Successfully parsed a <list>-map translation; copy new
  1324. * specifier into the out_args structure, keeping the
  1325. * bits specified in <list>-map-pass-thru.
  1326. */
  1327. match_array = map - new_size;
  1328. for (i = 0; i < new_size; i++) {
  1329. __be32 val = *(map - new_size + i);
  1330. if (i < list_size) {
  1331. val &= ~pass[i];
  1332. val |= cpu_to_be32(out_args->args[i]) & pass[i];
  1333. }
  1334. out_args->args[i] = be32_to_cpu(val);
  1335. }
  1336. out_args->args_count = list_size = new_size;
  1337. /* Iterate again with new provider */
  1338. out_args->np = new;
  1339. of_node_put(cur);
  1340. cur = new;
  1341. }
  1342. put:
  1343. of_node_put(cur);
  1344. of_node_put(new);
  1345. free:
  1346. kfree(mask_name);
  1347. kfree(map_name);
  1348. kfree(cells_name);
  1349. kfree(pass_name);
  1350. return ret;
  1351. }
  1352. EXPORT_SYMBOL(of_parse_phandle_with_args_map);
  1353. /**
  1354. * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
  1355. * @np: pointer to a device tree node containing a list
  1356. * @list_name: property name that contains a list
  1357. * @cell_count: number of argument cells following the phandle
  1358. * @index: index of a phandle to parse out
  1359. * @out_args: optional pointer to output arguments structure (will be filled)
  1360. *
  1361. * This function is useful to parse lists of phandles and their arguments.
  1362. * Returns 0 on success and fills out_args, on error returns appropriate
  1363. * errno value.
  1364. *
  1365. * Caller is responsible to call of_node_put() on the returned out_args->np
  1366. * pointer.
  1367. *
  1368. * Example:
  1369. *
  1370. * phandle1: node1 {
  1371. * }
  1372. *
  1373. * phandle2: node2 {
  1374. * }
  1375. *
  1376. * node3 {
  1377. * list = <&phandle1 0 2 &phandle2 2 3>;
  1378. * }
  1379. *
  1380. * To get a device_node of the `node2' node you may call this:
  1381. * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
  1382. */
  1383. int of_parse_phandle_with_fixed_args(const struct device_node *np,
  1384. const char *list_name, int cell_count,
  1385. int index, struct of_phandle_args *out_args)
  1386. {
  1387. if (index < 0)
  1388. return -EINVAL;
  1389. return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
  1390. index, out_args);
  1391. }
  1392. EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
  1393. /**
  1394. * of_count_phandle_with_args() - Find the number of phandles references in a property
  1395. * @np: pointer to a device tree node containing a list
  1396. * @list_name: property name that contains a list
  1397. * @cells_name: property name that specifies phandles' arguments count
  1398. *
  1399. * Returns the number of phandle + argument tuples within a property. It
  1400. * is a typical pattern to encode a list of phandle and variable
  1401. * arguments into a single property. The number of arguments is encoded
  1402. * by a property in the phandle-target node. For example, a gpios
  1403. * property would contain a list of GPIO specifies consisting of a
  1404. * phandle and 1 or more arguments. The number of arguments are
  1405. * determined by the #gpio-cells property in the node pointed to by the
  1406. * phandle.
  1407. */
  1408. int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
  1409. const char *cells_name)
  1410. {
  1411. struct of_phandle_iterator it;
  1412. int rc, cur_index = 0;
  1413. rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0);
  1414. if (rc)
  1415. return rc;
  1416. while ((rc = of_phandle_iterator_next(&it)) == 0)
  1417. cur_index += 1;
  1418. if (rc != -ENOENT)
  1419. return rc;
  1420. return cur_index;
  1421. }
  1422. EXPORT_SYMBOL(of_count_phandle_with_args);
  1423. /**
  1424. * __of_add_property - Add a property to a node without lock operations
  1425. */
  1426. int __of_add_property(struct device_node *np, struct property *prop)
  1427. {
  1428. struct property **next;
  1429. prop->next = NULL;
  1430. next = &np->properties;
  1431. while (*next) {
  1432. if (strcmp(prop->name, (*next)->name) == 0)
  1433. /* duplicate ! don't insert it */
  1434. return -EEXIST;
  1435. next = &(*next)->next;
  1436. }
  1437. *next = prop;
  1438. return 0;
  1439. }
  1440. /**
  1441. * of_add_property - Add a property to a node
  1442. */
  1443. int of_add_property(struct device_node *np, struct property *prop)
  1444. {
  1445. unsigned long flags;
  1446. int rc;
  1447. mutex_lock(&of_mutex);
  1448. raw_spin_lock_irqsave(&devtree_lock, flags);
  1449. rc = __of_add_property(np, prop);
  1450. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  1451. if (!rc)
  1452. __of_add_property_sysfs(np, prop);
  1453. mutex_unlock(&of_mutex);
  1454. if (!rc)
  1455. of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
  1456. return rc;
  1457. }
  1458. int __of_remove_property(struct device_node *np, struct property *prop)
  1459. {
  1460. struct property **next;
  1461. for (next = &np->properties; *next; next = &(*next)->next) {
  1462. if (*next == prop)
  1463. break;
  1464. }
  1465. if (*next == NULL)
  1466. return -ENODEV;
  1467. /* found the node */
  1468. *next = prop->next;
  1469. prop->next = np->deadprops;
  1470. np->deadprops = prop;
  1471. return 0;
  1472. }
  1473. /**
  1474. * of_remove_property - Remove a property from a node.
  1475. *
  1476. * Note that we don't actually remove it, since we have given out
  1477. * who-knows-how-many pointers to the data using get-property.
  1478. * Instead we just move the property to the "dead properties"
  1479. * list, so it won't be found any more.
  1480. */
  1481. int of_remove_property(struct device_node *np, struct property *prop)
  1482. {
  1483. unsigned long flags;
  1484. int rc;
  1485. if (!prop)
  1486. return -ENODEV;
  1487. mutex_lock(&of_mutex);
  1488. raw_spin_lock_irqsave(&devtree_lock, flags);
  1489. rc = __of_remove_property(np, prop);
  1490. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  1491. if (!rc)
  1492. __of_remove_property_sysfs(np, prop);
  1493. mutex_unlock(&of_mutex);
  1494. if (!rc)
  1495. of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
  1496. return rc;
  1497. }
  1498. int __of_update_property(struct device_node *np, struct property *newprop,
  1499. struct property **oldpropp)
  1500. {
  1501. struct property **next, *oldprop;
  1502. for (next = &np->properties; *next; next = &(*next)->next) {
  1503. if (of_prop_cmp((*next)->name, newprop->name) == 0)
  1504. break;
  1505. }
  1506. *oldpropp = oldprop = *next;
  1507. if (oldprop) {
  1508. /* replace the node */
  1509. newprop->next = oldprop->next;
  1510. *next = newprop;
  1511. oldprop->next = np->deadprops;
  1512. np->deadprops = oldprop;
  1513. } else {
  1514. /* new node */
  1515. newprop->next = NULL;
  1516. *next = newprop;
  1517. }
  1518. return 0;
  1519. }
  1520. /*
  1521. * of_update_property - Update a property in a node, if the property does
  1522. * not exist, add it.
  1523. *
  1524. * Note that we don't actually remove it, since we have given out
  1525. * who-knows-how-many pointers to the data using get-property.
  1526. * Instead we just move the property to the "dead properties" list,
  1527. * and add the new property to the property list
  1528. */
  1529. int of_update_property(struct device_node *np, struct property *newprop)
  1530. {
  1531. struct property *oldprop;
  1532. unsigned long flags;
  1533. int rc;
  1534. if (!newprop->name)
  1535. return -EINVAL;
  1536. mutex_lock(&of_mutex);
  1537. raw_spin_lock_irqsave(&devtree_lock, flags);
  1538. rc = __of_update_property(np, newprop, &oldprop);
  1539. raw_spin_unlock_irqrestore(&devtree_lock, flags);
  1540. if (!rc)
  1541. __of_update_property_sysfs(np, newprop, oldprop);
  1542. mutex_unlock(&of_mutex);
  1543. if (!rc)
  1544. of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
  1545. return rc;
  1546. }
  1547. static void of_alias_add(struct alias_prop *ap, struct device_node *np,
  1548. int id, const char *stem, int stem_len)
  1549. {
  1550. ap->np = np;
  1551. ap->id = id;
  1552. strncpy(ap->stem, stem, stem_len);
  1553. ap->stem[stem_len] = 0;
  1554. list_add_tail(&ap->link, &aliases_lookup);
  1555. pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
  1556. ap->alias, ap->stem, ap->id, np);
  1557. }
  1558. /**
  1559. * of_alias_scan - Scan all properties of the 'aliases' node
  1560. *
  1561. * The function scans all the properties of the 'aliases' node and populates
  1562. * the global lookup table with the properties. It returns the
  1563. * number of alias properties found, or an error code in case of failure.
  1564. *
  1565. * @dt_alloc: An allocator that provides a virtual address to memory
  1566. * for storing the resulting tree
  1567. */
  1568. void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
  1569. {
  1570. struct property *pp;
  1571. of_aliases = of_find_node_by_path("/aliases");
  1572. of_chosen = of_find_node_by_path("/chosen");
  1573. if (of_chosen == NULL)
  1574. of_chosen = of_find_node_by_path("/chosen@0");
  1575. if (of_chosen) {
  1576. /* linux,stdout-path and /aliases/stdout are for legacy compatibility */
  1577. const char *name = NULL;
  1578. if (of_property_read_string(of_chosen, "stdout-path", &name))
  1579. of_property_read_string(of_chosen, "linux,stdout-path",
  1580. &name);
  1581. if (IS_ENABLED(CONFIG_PPC) && !name)
  1582. of_property_read_string(of_aliases, "stdout", &name);
  1583. if (name)
  1584. of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
  1585. }
  1586. if (!of_aliases)
  1587. return;
  1588. for_each_property_of_node(of_aliases, pp) {
  1589. const char *start = pp->name;
  1590. const char *end = start + strlen(start);
  1591. struct device_node *np;
  1592. struct alias_prop *ap;
  1593. int id, len;
  1594. /* Skip those we do not want to proceed */
  1595. if (!strcmp(pp->name, "name") ||
  1596. !strcmp(pp->name, "phandle") ||
  1597. !strcmp(pp->name, "linux,phandle"))
  1598. continue;
  1599. np = of_find_node_by_path(pp->value);
  1600. if (!np)
  1601. continue;
  1602. /* walk the alias backwards to extract the id and work out
  1603. * the 'stem' string */
  1604. while (isdigit(*(end-1)) && end > start)
  1605. end--;
  1606. len = end - start;
  1607. if (kstrtoint(end, 10, &id) < 0)
  1608. continue;
  1609. /* Allocate an alias_prop with enough space for the stem */
  1610. ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
  1611. if (!ap)
  1612. continue;
  1613. memset(ap, 0, sizeof(*ap) + len + 1);
  1614. ap->alias = start;
  1615. of_alias_add(ap, np, id, start, len);
  1616. }
  1617. }
  1618. /**
  1619. * of_alias_get_id - Get alias id for the given device_node
  1620. * @np: Pointer to the given device_node
  1621. * @stem: Alias stem of the given device_node
  1622. *
  1623. * The function travels the lookup table to get the alias id for the given
  1624. * device_node and alias stem. It returns the alias id if found.
  1625. */
  1626. int of_alias_get_id(struct device_node *np, const char *stem)
  1627. {
  1628. struct alias_prop *app;
  1629. int id = -ENODEV;
  1630. mutex_lock(&of_mutex);
  1631. list_for_each_entry(app, &aliases_lookup, link) {
  1632. if (strcmp(app->stem, stem) != 0)
  1633. continue;
  1634. if (np == app->np) {
  1635. id = app->id;
  1636. break;
  1637. }
  1638. }
  1639. mutex_unlock(&of_mutex);
  1640. return id;
  1641. }
  1642. EXPORT_SYMBOL_GPL(of_alias_get_id);
  1643. /**
  1644. * of_alias_get_highest_id - Get highest alias id for the given stem
  1645. * @stem: Alias stem to be examined
  1646. *
  1647. * The function travels the lookup table to get the highest alias id for the
  1648. * given alias stem. It returns the alias id if found.
  1649. */
  1650. int of_alias_get_highest_id(const char *stem)
  1651. {
  1652. struct alias_prop *app;
  1653. int id = -ENODEV;
  1654. mutex_lock(&of_mutex);
  1655. list_for_each_entry(app, &aliases_lookup, link) {
  1656. if (strcmp(app->stem, stem) != 0)
  1657. continue;
  1658. if (app->id > id)
  1659. id = app->id;
  1660. }
  1661. mutex_unlock(&of_mutex);
  1662. return id;
  1663. }
  1664. EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
  1665. /**
  1666. * of_console_check() - Test and setup console for DT setup
  1667. * @dn - Pointer to device node
  1668. * @name - Name to use for preferred console without index. ex. "ttyS"
  1669. * @index - Index to use for preferred console.
  1670. *
  1671. * Check if the given device node matches the stdout-path property in the
  1672. * /chosen node. If it does then register it as the preferred console and return
  1673. * TRUE. Otherwise return FALSE.
  1674. */
  1675. bool of_console_check(struct device_node *dn, char *name, int index)
  1676. {
  1677. if (!dn || dn != of_stdout || console_set_on_cmdline)
  1678. return false;
  1679. /*
  1680. * XXX: cast `options' to char pointer to suppress complication
  1681. * warnings: printk, UART and console drivers expect char pointer.
  1682. */
  1683. return !add_preferred_console(name, index, (char *)of_stdout_options);
  1684. }
  1685. EXPORT_SYMBOL_GPL(of_console_check);
  1686. /**
  1687. * of_find_next_cache_node - Find a node's subsidiary cache
  1688. * @np: node of type "cpu" or "cache"
  1689. *
  1690. * Returns a node pointer with refcount incremented, use
  1691. * of_node_put() on it when done. Caller should hold a reference
  1692. * to np.
  1693. */
  1694. struct device_node *of_find_next_cache_node(const struct device_node *np)
  1695. {
  1696. struct device_node *child, *cache_node;
  1697. cache_node = of_parse_phandle(np, "l2-cache", 0);
  1698. if (!cache_node)
  1699. cache_node = of_parse_phandle(np, "next-level-cache", 0);
  1700. if (cache_node)
  1701. return cache_node;
  1702. /* OF on pmac has nodes instead of properties named "l2-cache"
  1703. * beneath CPU nodes.
  1704. */
  1705. if (!strcmp(np->type, "cpu"))
  1706. for_each_child_of_node(np, child)
  1707. if (!strcmp(child->type, "cache"))
  1708. return child;
  1709. return NULL;
  1710. }
  1711. /**
  1712. * of_find_last_cache_level - Find the level at which the last cache is
  1713. * present for the given logical cpu
  1714. *
  1715. * @cpu: cpu number(logical index) for which the last cache level is needed
  1716. *
  1717. * Returns the the level at which the last cache is present. It is exactly
  1718. * same as the total number of cache levels for the given logical cpu.
  1719. */
  1720. int of_find_last_cache_level(unsigned int cpu)
  1721. {
  1722. u32 cache_level = 0;
  1723. struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
  1724. while (np) {
  1725. prev = np;
  1726. of_node_put(np);
  1727. np = of_find_next_cache_node(np);
  1728. }
  1729. of_property_read_u32(prev, "cache-level", &cache_level);
  1730. return cache_level;
  1731. }