mdesc.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* mdesc.c: Sun4V machine description handling.
  3. *
  4. * Copyright (C) 2007, 2008 David S. Miller <[email protected]>
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/types.h>
  8. #include <linux/log2.h>
  9. #include <linux/list.h>
  10. #include <linux/slab.h>
  11. #include <linux/mm.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/memblock.h>
  14. #include <linux/export.h>
  15. #include <linux/refcount.h>
  16. #include <asm/cpudata.h>
  17. #include <asm/hypervisor.h>
  18. #include <asm/mdesc.h>
  19. #include <asm/prom.h>
  20. #include <linux/uaccess.h>
  21. #include <asm/oplib.h>
  22. #include <asm/smp.h>
  23. #include <asm/adi.h>
  24. /* Unlike the OBP device tree, the machine description is a full-on
  25. * DAG. An arbitrary number of ARCs are possible from one
  26. * node to other nodes and thus we can't use the OBP device_node
  27. * data structure to represent these nodes inside of the kernel.
  28. *
  29. * Actually, it isn't even a DAG, because there are back pointers
  30. * which create cycles in the graph.
  31. *
  32. * mdesc_hdr and mdesc_elem describe the layout of the data structure
  33. * we get from the Hypervisor.
  34. */
  35. struct mdesc_hdr {
  36. u32 version; /* Transport version */
  37. u32 node_sz; /* node block size */
  38. u32 name_sz; /* name block size */
  39. u32 data_sz; /* data block size */
  40. char data[];
  41. } __attribute__((aligned(16)));
  42. struct mdesc_elem {
  43. u8 tag;
  44. #define MD_LIST_END 0x00
  45. #define MD_NODE 0x4e
  46. #define MD_NODE_END 0x45
  47. #define MD_NOOP 0x20
  48. #define MD_PROP_ARC 0x61
  49. #define MD_PROP_VAL 0x76
  50. #define MD_PROP_STR 0x73
  51. #define MD_PROP_DATA 0x64
  52. u8 name_len;
  53. u16 resv;
  54. u32 name_offset;
  55. union {
  56. struct {
  57. u32 data_len;
  58. u32 data_offset;
  59. } data;
  60. u64 val;
  61. } d;
  62. };
  63. struct mdesc_mem_ops {
  64. struct mdesc_handle *(*alloc)(unsigned int mdesc_size);
  65. void (*free)(struct mdesc_handle *handle);
  66. };
  67. struct mdesc_handle {
  68. struct list_head list;
  69. struct mdesc_mem_ops *mops;
  70. void *self_base;
  71. refcount_t refcnt;
  72. unsigned int handle_size;
  73. struct mdesc_hdr mdesc;
  74. };
  75. typedef int (*mdesc_node_info_get_f)(struct mdesc_handle *, u64,
  76. union md_node_info *);
  77. typedef void (*mdesc_node_info_rel_f)(union md_node_info *);
  78. typedef bool (*mdesc_node_match_f)(union md_node_info *, union md_node_info *);
  79. struct md_node_ops {
  80. char *name;
  81. mdesc_node_info_get_f get_info;
  82. mdesc_node_info_rel_f rel_info;
  83. mdesc_node_match_f node_match;
  84. };
  85. static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
  86. union md_node_info *node_info);
  87. static void rel_vdev_port_node_info(union md_node_info *node_info);
  88. static bool vdev_port_node_match(union md_node_info *a_node_info,
  89. union md_node_info *b_node_info);
  90. static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
  91. union md_node_info *node_info);
  92. static void rel_ds_port_node_info(union md_node_info *node_info);
  93. static bool ds_port_node_match(union md_node_info *a_node_info,
  94. union md_node_info *b_node_info);
  95. /* supported node types which can be registered */
  96. static struct md_node_ops md_node_ops_table[] = {
  97. {"virtual-device-port", get_vdev_port_node_info,
  98. rel_vdev_port_node_info, vdev_port_node_match},
  99. {"domain-services-port", get_ds_port_node_info,
  100. rel_ds_port_node_info, ds_port_node_match},
  101. {NULL, NULL, NULL, NULL}
  102. };
  103. static void mdesc_get_node_ops(const char *node_name,
  104. mdesc_node_info_get_f *get_info_f,
  105. mdesc_node_info_rel_f *rel_info_f,
  106. mdesc_node_match_f *match_f)
  107. {
  108. int i;
  109. if (get_info_f)
  110. *get_info_f = NULL;
  111. if (rel_info_f)
  112. *rel_info_f = NULL;
  113. if (match_f)
  114. *match_f = NULL;
  115. if (!node_name)
  116. return;
  117. for (i = 0; md_node_ops_table[i].name != NULL; i++) {
  118. if (strcmp(md_node_ops_table[i].name, node_name) == 0) {
  119. if (get_info_f)
  120. *get_info_f = md_node_ops_table[i].get_info;
  121. if (rel_info_f)
  122. *rel_info_f = md_node_ops_table[i].rel_info;
  123. if (match_f)
  124. *match_f = md_node_ops_table[i].node_match;
  125. break;
  126. }
  127. }
  128. }
  129. static void mdesc_handle_init(struct mdesc_handle *hp,
  130. unsigned int handle_size,
  131. void *base)
  132. {
  133. BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1));
  134. memset(hp, 0, handle_size);
  135. INIT_LIST_HEAD(&hp->list);
  136. hp->self_base = base;
  137. refcount_set(&hp->refcnt, 1);
  138. hp->handle_size = handle_size;
  139. }
  140. static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size)
  141. {
  142. unsigned int handle_size, alloc_size;
  143. struct mdesc_handle *hp;
  144. unsigned long paddr;
  145. handle_size = (sizeof(struct mdesc_handle) -
  146. sizeof(struct mdesc_hdr) +
  147. mdesc_size);
  148. alloc_size = PAGE_ALIGN(handle_size);
  149. paddr = memblock_phys_alloc(alloc_size, PAGE_SIZE);
  150. hp = NULL;
  151. if (paddr) {
  152. hp = __va(paddr);
  153. mdesc_handle_init(hp, handle_size, hp);
  154. }
  155. return hp;
  156. }
  157. static void __init mdesc_memblock_free(struct mdesc_handle *hp)
  158. {
  159. unsigned int alloc_size;
  160. unsigned long start;
  161. BUG_ON(refcount_read(&hp->refcnt) != 0);
  162. BUG_ON(!list_empty(&hp->list));
  163. alloc_size = PAGE_ALIGN(hp->handle_size);
  164. start = __pa(hp);
  165. memblock_free_late(start, alloc_size);
  166. }
  167. static struct mdesc_mem_ops memblock_mdesc_ops = {
  168. .alloc = mdesc_memblock_alloc,
  169. .free = mdesc_memblock_free,
  170. };
  171. static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
  172. {
  173. unsigned int handle_size;
  174. struct mdesc_handle *hp;
  175. unsigned long addr;
  176. void *base;
  177. handle_size = (sizeof(struct mdesc_handle) -
  178. sizeof(struct mdesc_hdr) +
  179. mdesc_size);
  180. base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
  181. if (!base)
  182. return NULL;
  183. addr = (unsigned long)base;
  184. addr = (addr + 15UL) & ~15UL;
  185. hp = (struct mdesc_handle *) addr;
  186. mdesc_handle_init(hp, handle_size, base);
  187. return hp;
  188. }
  189. static void mdesc_kfree(struct mdesc_handle *hp)
  190. {
  191. BUG_ON(refcount_read(&hp->refcnt) != 0);
  192. BUG_ON(!list_empty(&hp->list));
  193. kfree(hp->self_base);
  194. }
  195. static struct mdesc_mem_ops kmalloc_mdesc_memops = {
  196. .alloc = mdesc_kmalloc,
  197. .free = mdesc_kfree,
  198. };
  199. static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size,
  200. struct mdesc_mem_ops *mops)
  201. {
  202. struct mdesc_handle *hp = mops->alloc(mdesc_size);
  203. if (hp)
  204. hp->mops = mops;
  205. return hp;
  206. }
  207. static void mdesc_free(struct mdesc_handle *hp)
  208. {
  209. hp->mops->free(hp);
  210. }
  211. static struct mdesc_handle *cur_mdesc;
  212. static LIST_HEAD(mdesc_zombie_list);
  213. static DEFINE_SPINLOCK(mdesc_lock);
  214. struct mdesc_handle *mdesc_grab(void)
  215. {
  216. struct mdesc_handle *hp;
  217. unsigned long flags;
  218. spin_lock_irqsave(&mdesc_lock, flags);
  219. hp = cur_mdesc;
  220. if (hp)
  221. refcount_inc(&hp->refcnt);
  222. spin_unlock_irqrestore(&mdesc_lock, flags);
  223. return hp;
  224. }
  225. EXPORT_SYMBOL(mdesc_grab);
  226. void mdesc_release(struct mdesc_handle *hp)
  227. {
  228. unsigned long flags;
  229. spin_lock_irqsave(&mdesc_lock, flags);
  230. if (refcount_dec_and_test(&hp->refcnt)) {
  231. list_del_init(&hp->list);
  232. hp->mops->free(hp);
  233. }
  234. spin_unlock_irqrestore(&mdesc_lock, flags);
  235. }
  236. EXPORT_SYMBOL(mdesc_release);
  237. static DEFINE_MUTEX(mdesc_mutex);
  238. static struct mdesc_notifier_client *client_list;
  239. void mdesc_register_notifier(struct mdesc_notifier_client *client)
  240. {
  241. bool supported = false;
  242. u64 node;
  243. int i;
  244. mutex_lock(&mdesc_mutex);
  245. /* check to see if the node is supported for registration */
  246. for (i = 0; md_node_ops_table[i].name != NULL; i++) {
  247. if (strcmp(md_node_ops_table[i].name, client->node_name) == 0) {
  248. supported = true;
  249. break;
  250. }
  251. }
  252. if (!supported) {
  253. pr_err("MD: %s node not supported\n", client->node_name);
  254. mutex_unlock(&mdesc_mutex);
  255. return;
  256. }
  257. client->next = client_list;
  258. client_list = client;
  259. mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
  260. client->add(cur_mdesc, node, client->node_name);
  261. mutex_unlock(&mdesc_mutex);
  262. }
  263. static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
  264. {
  265. const u64 *id;
  266. u64 a;
  267. id = NULL;
  268. mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
  269. u64 target;
  270. target = mdesc_arc_target(hp, a);
  271. id = mdesc_get_property(hp, target,
  272. "cfg-handle", NULL);
  273. if (id)
  274. break;
  275. }
  276. return id;
  277. }
  278. static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
  279. union md_node_info *node_info)
  280. {
  281. const u64 *parent_cfg_hdlp;
  282. const char *name;
  283. const u64 *idp;
  284. /*
  285. * Virtual device nodes are distinguished by:
  286. * 1. "id" property
  287. * 2. "name" property
  288. * 3. parent node "cfg-handle" property
  289. */
  290. idp = mdesc_get_property(md, node, "id", NULL);
  291. name = mdesc_get_property(md, node, "name", NULL);
  292. parent_cfg_hdlp = parent_cfg_handle(md, node);
  293. if (!idp || !name || !parent_cfg_hdlp)
  294. return -1;
  295. node_info->vdev_port.id = *idp;
  296. node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL);
  297. if (!node_info->vdev_port.name)
  298. return -1;
  299. node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp;
  300. return 0;
  301. }
  302. static void rel_vdev_port_node_info(union md_node_info *node_info)
  303. {
  304. if (node_info && node_info->vdev_port.name) {
  305. kfree_const(node_info->vdev_port.name);
  306. node_info->vdev_port.name = NULL;
  307. }
  308. }
  309. static bool vdev_port_node_match(union md_node_info *a_node_info,
  310. union md_node_info *b_node_info)
  311. {
  312. if (a_node_info->vdev_port.id != b_node_info->vdev_port.id)
  313. return false;
  314. if (a_node_info->vdev_port.parent_cfg_hdl !=
  315. b_node_info->vdev_port.parent_cfg_hdl)
  316. return false;
  317. if (strncmp(a_node_info->vdev_port.name,
  318. b_node_info->vdev_port.name, MDESC_MAX_STR_LEN) != 0)
  319. return false;
  320. return true;
  321. }
  322. static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
  323. union md_node_info *node_info)
  324. {
  325. const u64 *idp;
  326. /* DS port nodes use the "id" property to distinguish them */
  327. idp = mdesc_get_property(md, node, "id", NULL);
  328. if (!idp)
  329. return -1;
  330. node_info->ds_port.id = *idp;
  331. return 0;
  332. }
  333. static void rel_ds_port_node_info(union md_node_info *node_info)
  334. {
  335. }
  336. static bool ds_port_node_match(union md_node_info *a_node_info,
  337. union md_node_info *b_node_info)
  338. {
  339. if (a_node_info->ds_port.id != b_node_info->ds_port.id)
  340. return false;
  341. return true;
  342. }
  343. /* Run 'func' on nodes which are in A but not in B. */
  344. static void invoke_on_missing(const char *name,
  345. struct mdesc_handle *a,
  346. struct mdesc_handle *b,
  347. void (*func)(struct mdesc_handle *, u64,
  348. const char *node_name))
  349. {
  350. mdesc_node_info_get_f get_info_func;
  351. mdesc_node_info_rel_f rel_info_func;
  352. mdesc_node_match_f node_match_func;
  353. union md_node_info a_node_info;
  354. union md_node_info b_node_info;
  355. bool found;
  356. u64 a_node;
  357. u64 b_node;
  358. int rv;
  359. /*
  360. * Find the get_info, rel_info and node_match ops for the given
  361. * node name
  362. */
  363. mdesc_get_node_ops(name, &get_info_func, &rel_info_func,
  364. &node_match_func);
  365. /* If we didn't find a match, the node type is not supported */
  366. if (!get_info_func || !rel_info_func || !node_match_func) {
  367. pr_err("MD: %s node type is not supported\n", name);
  368. return;
  369. }
  370. mdesc_for_each_node_by_name(a, a_node, name) {
  371. found = false;
  372. rv = get_info_func(a, a_node, &a_node_info);
  373. if (rv != 0) {
  374. pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
  375. name);
  376. continue;
  377. }
  378. /* Check each node in B for node matching a_node */
  379. mdesc_for_each_node_by_name(b, b_node, name) {
  380. rv = get_info_func(b, b_node, &b_node_info);
  381. if (rv != 0)
  382. continue;
  383. if (node_match_func(&a_node_info, &b_node_info)) {
  384. found = true;
  385. rel_info_func(&b_node_info);
  386. break;
  387. }
  388. rel_info_func(&b_node_info);
  389. }
  390. rel_info_func(&a_node_info);
  391. if (!found)
  392. func(a, a_node, name);
  393. }
  394. }
  395. static void notify_one(struct mdesc_notifier_client *p,
  396. struct mdesc_handle *old_hp,
  397. struct mdesc_handle *new_hp)
  398. {
  399. invoke_on_missing(p->node_name, old_hp, new_hp, p->remove);
  400. invoke_on_missing(p->node_name, new_hp, old_hp, p->add);
  401. }
  402. static void mdesc_notify_clients(struct mdesc_handle *old_hp,
  403. struct mdesc_handle *new_hp)
  404. {
  405. struct mdesc_notifier_client *p = client_list;
  406. while (p) {
  407. notify_one(p, old_hp, new_hp);
  408. p = p->next;
  409. }
  410. }
  411. void mdesc_update(void)
  412. {
  413. unsigned long len, real_len, status;
  414. struct mdesc_handle *hp, *orig_hp;
  415. unsigned long flags;
  416. mutex_lock(&mdesc_mutex);
  417. (void) sun4v_mach_desc(0UL, 0UL, &len);
  418. hp = mdesc_alloc(len, &kmalloc_mdesc_memops);
  419. if (!hp) {
  420. printk(KERN_ERR "MD: mdesc alloc fails\n");
  421. goto out;
  422. }
  423. status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
  424. if (status != HV_EOK || real_len > len) {
  425. printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
  426. status);
  427. refcount_dec(&hp->refcnt);
  428. mdesc_free(hp);
  429. goto out;
  430. }
  431. spin_lock_irqsave(&mdesc_lock, flags);
  432. orig_hp = cur_mdesc;
  433. cur_mdesc = hp;
  434. spin_unlock_irqrestore(&mdesc_lock, flags);
  435. mdesc_notify_clients(orig_hp, hp);
  436. spin_lock_irqsave(&mdesc_lock, flags);
  437. if (refcount_dec_and_test(&orig_hp->refcnt))
  438. mdesc_free(orig_hp);
  439. else
  440. list_add(&orig_hp->list, &mdesc_zombie_list);
  441. spin_unlock_irqrestore(&mdesc_lock, flags);
  442. out:
  443. mutex_unlock(&mdesc_mutex);
  444. }
  445. u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name,
  446. union md_node_info *node_info)
  447. {
  448. mdesc_node_info_get_f get_info_func;
  449. mdesc_node_info_rel_f rel_info_func;
  450. mdesc_node_match_f node_match_func;
  451. union md_node_info hp_node_info;
  452. u64 hp_node;
  453. int rv;
  454. if (hp == NULL || node_name == NULL || node_info == NULL)
  455. return MDESC_NODE_NULL;
  456. /* Find the ops for the given node name */
  457. mdesc_get_node_ops(node_name, &get_info_func, &rel_info_func,
  458. &node_match_func);
  459. /* If we didn't find ops for the given node name, it is not supported */
  460. if (!get_info_func || !rel_info_func || !node_match_func) {
  461. pr_err("MD: %s node is not supported\n", node_name);
  462. return -EINVAL;
  463. }
  464. mdesc_for_each_node_by_name(hp, hp_node, node_name) {
  465. rv = get_info_func(hp, hp_node, &hp_node_info);
  466. if (rv != 0)
  467. continue;
  468. if (node_match_func(node_info, &hp_node_info))
  469. break;
  470. rel_info_func(&hp_node_info);
  471. }
  472. rel_info_func(&hp_node_info);
  473. return hp_node;
  474. }
  475. EXPORT_SYMBOL(mdesc_get_node);
  476. int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
  477. const char *node_name, union md_node_info *node_info)
  478. {
  479. mdesc_node_info_get_f get_info_func;
  480. int rv;
  481. if (hp == NULL || node == MDESC_NODE_NULL ||
  482. node_name == NULL || node_info == NULL)
  483. return -EINVAL;
  484. /* Find the get_info op for the given node name */
  485. mdesc_get_node_ops(node_name, &get_info_func, NULL, NULL);
  486. /* If we didn't find a get_info_func, the node name is not supported */
  487. if (get_info_func == NULL) {
  488. pr_err("MD: %s node is not supported\n", node_name);
  489. return -EINVAL;
  490. }
  491. rv = get_info_func(hp, node, node_info);
  492. if (rv != 0) {
  493. pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
  494. node_name);
  495. return -1;
  496. }
  497. return 0;
  498. }
  499. EXPORT_SYMBOL(mdesc_get_node_info);
  500. static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
  501. {
  502. return (struct mdesc_elem *) mdesc->data;
  503. }
  504. static void *name_block(struct mdesc_hdr *mdesc)
  505. {
  506. return ((void *) node_block(mdesc)) + mdesc->node_sz;
  507. }
  508. static void *data_block(struct mdesc_hdr *mdesc)
  509. {
  510. return ((void *) name_block(mdesc)) + mdesc->name_sz;
  511. }
  512. u64 mdesc_node_by_name(struct mdesc_handle *hp,
  513. u64 from_node, const char *name)
  514. {
  515. struct mdesc_elem *ep = node_block(&hp->mdesc);
  516. const char *names = name_block(&hp->mdesc);
  517. u64 last_node = hp->mdesc.node_sz / 16;
  518. u64 ret;
  519. if (from_node == MDESC_NODE_NULL) {
  520. ret = from_node = 0;
  521. } else if (from_node >= last_node) {
  522. return MDESC_NODE_NULL;
  523. } else {
  524. ret = ep[from_node].d.val;
  525. }
  526. while (ret < last_node) {
  527. if (ep[ret].tag != MD_NODE)
  528. return MDESC_NODE_NULL;
  529. if (!strcmp(names + ep[ret].name_offset, name))
  530. break;
  531. ret = ep[ret].d.val;
  532. }
  533. if (ret >= last_node)
  534. ret = MDESC_NODE_NULL;
  535. return ret;
  536. }
  537. EXPORT_SYMBOL(mdesc_node_by_name);
  538. const void *mdesc_get_property(struct mdesc_handle *hp, u64 node,
  539. const char *name, int *lenp)
  540. {
  541. const char *names = name_block(&hp->mdesc);
  542. u64 last_node = hp->mdesc.node_sz / 16;
  543. void *data = data_block(&hp->mdesc);
  544. struct mdesc_elem *ep;
  545. if (node == MDESC_NODE_NULL || node >= last_node)
  546. return NULL;
  547. ep = node_block(&hp->mdesc) + node;
  548. ep++;
  549. for (; ep->tag != MD_NODE_END; ep++) {
  550. void *val = NULL;
  551. int len = 0;
  552. switch (ep->tag) {
  553. case MD_PROP_VAL:
  554. val = &ep->d.val;
  555. len = 8;
  556. break;
  557. case MD_PROP_STR:
  558. case MD_PROP_DATA:
  559. val = data + ep->d.data.data_offset;
  560. len = ep->d.data.data_len;
  561. break;
  562. default:
  563. break;
  564. }
  565. if (!val)
  566. continue;
  567. if (!strcmp(names + ep->name_offset, name)) {
  568. if (lenp)
  569. *lenp = len;
  570. return val;
  571. }
  572. }
  573. return NULL;
  574. }
  575. EXPORT_SYMBOL(mdesc_get_property);
  576. u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type)
  577. {
  578. struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
  579. const char *names = name_block(&hp->mdesc);
  580. u64 last_node = hp->mdesc.node_sz / 16;
  581. if (from == MDESC_NODE_NULL || from >= last_node)
  582. return MDESC_NODE_NULL;
  583. ep = base + from;
  584. ep++;
  585. for (; ep->tag != MD_NODE_END; ep++) {
  586. if (ep->tag != MD_PROP_ARC)
  587. continue;
  588. if (strcmp(names + ep->name_offset, arc_type))
  589. continue;
  590. return ep - base;
  591. }
  592. return MDESC_NODE_NULL;
  593. }
  594. EXPORT_SYMBOL(mdesc_next_arc);
  595. u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc)
  596. {
  597. struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
  598. ep = base + arc;
  599. return ep->d.val;
  600. }
  601. EXPORT_SYMBOL(mdesc_arc_target);
  602. const char *mdesc_node_name(struct mdesc_handle *hp, u64 node)
  603. {
  604. struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
  605. const char *names = name_block(&hp->mdesc);
  606. u64 last_node = hp->mdesc.node_sz / 16;
  607. if (node == MDESC_NODE_NULL || node >= last_node)
  608. return NULL;
  609. ep = base + node;
  610. if (ep->tag != MD_NODE)
  611. return NULL;
  612. return names + ep->name_offset;
  613. }
  614. EXPORT_SYMBOL(mdesc_node_name);
  615. static u64 max_cpus = 64;
  616. static void __init report_platform_properties(void)
  617. {
  618. struct mdesc_handle *hp = mdesc_grab();
  619. u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
  620. const char *s;
  621. const u64 *v;
  622. if (pn == MDESC_NODE_NULL) {
  623. prom_printf("No platform node in machine-description.\n");
  624. prom_halt();
  625. }
  626. s = mdesc_get_property(hp, pn, "banner-name", NULL);
  627. printk("PLATFORM: banner-name [%s]\n", s);
  628. s = mdesc_get_property(hp, pn, "name", NULL);
  629. printk("PLATFORM: name [%s]\n", s);
  630. v = mdesc_get_property(hp, pn, "hostid", NULL);
  631. if (v)
  632. printk("PLATFORM: hostid [%08llx]\n", *v);
  633. v = mdesc_get_property(hp, pn, "serial#", NULL);
  634. if (v)
  635. printk("PLATFORM: serial# [%08llx]\n", *v);
  636. v = mdesc_get_property(hp, pn, "stick-frequency", NULL);
  637. printk("PLATFORM: stick-frequency [%08llx]\n", *v);
  638. v = mdesc_get_property(hp, pn, "mac-address", NULL);
  639. if (v)
  640. printk("PLATFORM: mac-address [%llx]\n", *v);
  641. v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL);
  642. if (v)
  643. printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v);
  644. v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL);
  645. if (v)
  646. printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v);
  647. v = mdesc_get_property(hp, pn, "max-cpus", NULL);
  648. if (v) {
  649. max_cpus = *v;
  650. printk("PLATFORM: max-cpus [%llu]\n", max_cpus);
  651. }
  652. #ifdef CONFIG_SMP
  653. {
  654. int max_cpu, i;
  655. if (v) {
  656. max_cpu = *v;
  657. if (max_cpu > NR_CPUS)
  658. max_cpu = NR_CPUS;
  659. } else {
  660. max_cpu = NR_CPUS;
  661. }
  662. for (i = 0; i < max_cpu; i++)
  663. set_cpu_possible(i, true);
  664. }
  665. #endif
  666. mdesc_release(hp);
  667. }
  668. static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
  669. {
  670. const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
  671. const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
  672. const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL);
  673. const char *type;
  674. int type_len;
  675. type = mdesc_get_property(hp, mp, "type", &type_len);
  676. switch (*level) {
  677. case 1:
  678. if (of_find_in_proplist(type, "instn", type_len)) {
  679. c->icache_size = *size;
  680. c->icache_line_size = *line_size;
  681. } else if (of_find_in_proplist(type, "data", type_len)) {
  682. c->dcache_size = *size;
  683. c->dcache_line_size = *line_size;
  684. }
  685. break;
  686. case 2:
  687. c->ecache_size = *size;
  688. c->ecache_line_size = *line_size;
  689. break;
  690. default:
  691. break;
  692. }
  693. if (*level == 1) {
  694. u64 a;
  695. mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
  696. u64 target = mdesc_arc_target(hp, a);
  697. const char *name = mdesc_node_name(hp, target);
  698. if (!strcmp(name, "cache"))
  699. fill_in_one_cache(c, hp, target);
  700. }
  701. }
  702. }
  703. static void find_back_node_value(struct mdesc_handle *hp, u64 node,
  704. char *srch_val,
  705. void (*func)(struct mdesc_handle *, u64, int),
  706. u64 val, int depth)
  707. {
  708. u64 arc;
  709. /* Since we have an estimate of recursion depth, do a sanity check. */
  710. if (depth == 0)
  711. return;
  712. mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
  713. u64 n = mdesc_arc_target(hp, arc);
  714. const char *name = mdesc_node_name(hp, n);
  715. if (!strcmp(srch_val, name))
  716. (*func)(hp, n, val);
  717. find_back_node_value(hp, n, srch_val, func, val, depth-1);
  718. }
  719. }
  720. static void __mark_core_id(struct mdesc_handle *hp, u64 node,
  721. int core_id)
  722. {
  723. const u64 *id = mdesc_get_property(hp, node, "id", NULL);
  724. if (*id < num_possible_cpus())
  725. cpu_data(*id).core_id = core_id;
  726. }
  727. static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node,
  728. int max_cache_id)
  729. {
  730. const u64 *id = mdesc_get_property(hp, node, "id", NULL);
  731. if (*id < num_possible_cpus()) {
  732. cpu_data(*id).max_cache_id = max_cache_id;
  733. /**
  734. * On systems without explicit socket descriptions socket
  735. * is max_cache_id
  736. */
  737. cpu_data(*id).sock_id = max_cache_id;
  738. }
  739. }
  740. static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
  741. int core_id)
  742. {
  743. find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
  744. }
  745. static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp,
  746. int max_cache_id)
  747. {
  748. find_back_node_value(hp, mp, "cpu", __mark_max_cache_id,
  749. max_cache_id, 10);
  750. }
  751. static void set_core_ids(struct mdesc_handle *hp)
  752. {
  753. int idx;
  754. u64 mp;
  755. idx = 1;
  756. /* Identify unique cores by looking for cpus backpointed to by
  757. * level 1 instruction caches.
  758. */
  759. mdesc_for_each_node_by_name(hp, mp, "cache") {
  760. const u64 *level;
  761. const char *type;
  762. int len;
  763. level = mdesc_get_property(hp, mp, "level", NULL);
  764. if (*level != 1)
  765. continue;
  766. type = mdesc_get_property(hp, mp, "type", &len);
  767. if (!of_find_in_proplist(type, "instn", len))
  768. continue;
  769. mark_core_ids(hp, mp, idx);
  770. idx++;
  771. }
  772. }
  773. static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level)
  774. {
  775. u64 mp;
  776. int idx = 1;
  777. int fnd = 0;
  778. /**
  779. * Identify unique highest level of shared cache by looking for cpus
  780. * backpointed to by shared level N caches.
  781. */
  782. mdesc_for_each_node_by_name(hp, mp, "cache") {
  783. const u64 *cur_lvl;
  784. cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
  785. if (*cur_lvl != level)
  786. continue;
  787. mark_max_cache_ids(hp, mp, idx);
  788. idx++;
  789. fnd = 1;
  790. }
  791. return fnd;
  792. }
  793. static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
  794. {
  795. int idx = 1;
  796. mdesc_for_each_node_by_name(hp, mp, "socket") {
  797. u64 a;
  798. mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
  799. u64 t = mdesc_arc_target(hp, a);
  800. const char *name;
  801. const u64 *id;
  802. name = mdesc_node_name(hp, t);
  803. if (strcmp(name, "cpu"))
  804. continue;
  805. id = mdesc_get_property(hp, t, "id", NULL);
  806. if (*id < num_possible_cpus())
  807. cpu_data(*id).sock_id = idx;
  808. }
  809. idx++;
  810. }
  811. }
  812. static void set_sock_ids(struct mdesc_handle *hp)
  813. {
  814. u64 mp;
  815. /**
  816. * Find the highest level of shared cache which pre-T7 is also
  817. * the socket.
  818. */
  819. if (!set_max_cache_ids_by_cache(hp, 3))
  820. set_max_cache_ids_by_cache(hp, 2);
  821. /* If machine description exposes sockets data use it.*/
  822. mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
  823. if (mp != MDESC_NODE_NULL)
  824. set_sock_ids_by_socket(hp, mp);
  825. }
  826. static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
  827. {
  828. u64 a;
  829. mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
  830. u64 t = mdesc_arc_target(hp, a);
  831. const char *name;
  832. const u64 *id;
  833. name = mdesc_node_name(hp, t);
  834. if (strcmp(name, "cpu"))
  835. continue;
  836. id = mdesc_get_property(hp, t, "id", NULL);
  837. if (*id < NR_CPUS)
  838. cpu_data(*id).proc_id = proc_id;
  839. }
  840. }
  841. static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
  842. {
  843. int idx;
  844. u64 mp;
  845. idx = 0;
  846. mdesc_for_each_node_by_name(hp, mp, exec_unit_name) {
  847. const char *type;
  848. int len;
  849. type = mdesc_get_property(hp, mp, "type", &len);
  850. if (!of_find_in_proplist(type, "int", len) &&
  851. !of_find_in_proplist(type, "integer", len))
  852. continue;
  853. mark_proc_ids(hp, mp, idx);
  854. idx++;
  855. }
  856. }
  857. static void set_proc_ids(struct mdesc_handle *hp)
  858. {
  859. __set_proc_ids(hp, "exec_unit");
  860. __set_proc_ids(hp, "exec-unit");
  861. }
  862. static void get_one_mondo_bits(const u64 *p, unsigned int *mask,
  863. unsigned long def, unsigned long max)
  864. {
  865. u64 val;
  866. if (!p)
  867. goto use_default;
  868. val = *p;
  869. if (!val || val >= 64)
  870. goto use_default;
  871. if (val > max)
  872. val = max;
  873. *mask = ((1U << val) * 64U) - 1U;
  874. return;
  875. use_default:
  876. *mask = ((1U << def) * 64U) - 1U;
  877. }
  878. static void get_mondo_data(struct mdesc_handle *hp, u64 mp,
  879. struct trap_per_cpu *tb)
  880. {
  881. static int printed;
  882. const u64 *val;
  883. val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL);
  884. get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2));
  885. val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL);
  886. get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8);
  887. val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL);
  888. get_one_mondo_bits(val, &tb->resum_qmask, 6, 7);
  889. val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL);
  890. get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2);
  891. if (!printed++) {
  892. pr_info("SUN4V: Mondo queue sizes "
  893. "[cpu(%u) dev(%u) r(%u) nr(%u)]\n",
  894. tb->cpu_mondo_qmask + 1,
  895. tb->dev_mondo_qmask + 1,
  896. tb->resum_qmask + 1,
  897. tb->nonresum_qmask + 1);
  898. }
  899. }
  900. static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
  901. {
  902. struct mdesc_handle *hp = mdesc_grab();
  903. void *ret = NULL;
  904. u64 mp;
  905. mdesc_for_each_node_by_name(hp, mp, "cpu") {
  906. const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
  907. int cpuid = *id;
  908. #ifdef CONFIG_SMP
  909. if (cpuid >= NR_CPUS) {
  910. printk(KERN_WARNING "Ignoring CPU %d which is "
  911. ">= NR_CPUS (%d)\n",
  912. cpuid, NR_CPUS);
  913. continue;
  914. }
  915. if (!cpumask_test_cpu(cpuid, mask))
  916. continue;
  917. #endif
  918. ret = func(hp, mp, cpuid, arg);
  919. if (ret)
  920. goto out;
  921. }
  922. out:
  923. mdesc_release(hp);
  924. return ret;
  925. }
  926. static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
  927. void *arg)
  928. {
  929. ncpus_probed++;
  930. #ifdef CONFIG_SMP
  931. set_cpu_present(cpuid, true);
  932. #endif
  933. return NULL;
  934. }
  935. void mdesc_populate_present_mask(cpumask_t *mask)
  936. {
  937. if (tlb_type != hypervisor)
  938. return;
  939. ncpus_probed = 0;
  940. mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
  941. }
  942. static void * __init check_one_pgsz(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
  943. {
  944. const u64 *pgsz_prop = mdesc_get_property(hp, mp, "mmu-page-size-list", NULL);
  945. unsigned long *pgsz_mask = arg;
  946. u64 val;
  947. val = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
  948. HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
  949. if (pgsz_prop)
  950. val = *pgsz_prop;
  951. if (!*pgsz_mask)
  952. *pgsz_mask = val;
  953. else
  954. *pgsz_mask &= val;
  955. return NULL;
  956. }
  957. void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask)
  958. {
  959. *pgsz_mask = 0;
  960. mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask);
  961. }
  962. static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
  963. void *arg)
  964. {
  965. const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
  966. struct trap_per_cpu *tb;
  967. cpuinfo_sparc *c;
  968. u64 a;
  969. #ifndef CONFIG_SMP
  970. /* On uniprocessor we only want the values for the
  971. * real physical cpu the kernel booted onto, however
  972. * cpu_data() only has one entry at index 0.
  973. */
  974. if (cpuid != real_hard_smp_processor_id())
  975. return NULL;
  976. cpuid = 0;
  977. #endif
  978. c = &cpu_data(cpuid);
  979. c->clock_tick = *cfreq;
  980. tb = &trap_block[cpuid];
  981. get_mondo_data(hp, mp, tb);
  982. mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
  983. u64 j, t = mdesc_arc_target(hp, a);
  984. const char *t_name;
  985. t_name = mdesc_node_name(hp, t);
  986. if (!strcmp(t_name, "cache")) {
  987. fill_in_one_cache(c, hp, t);
  988. continue;
  989. }
  990. mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
  991. u64 n = mdesc_arc_target(hp, j);
  992. const char *n_name;
  993. n_name = mdesc_node_name(hp, n);
  994. if (!strcmp(n_name, "cache"))
  995. fill_in_one_cache(c, hp, n);
  996. }
  997. }
  998. c->core_id = 0;
  999. c->proc_id = -1;
  1000. return NULL;
  1001. }
  1002. void mdesc_fill_in_cpu_data(cpumask_t *mask)
  1003. {
  1004. struct mdesc_handle *hp;
  1005. mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
  1006. hp = mdesc_grab();
  1007. set_core_ids(hp);
  1008. set_proc_ids(hp);
  1009. set_sock_ids(hp);
  1010. mdesc_release(hp);
  1011. smp_fill_in_sib_core_maps();
  1012. }
  1013. /* mdesc_open() - Grab a reference to mdesc_handle when /dev/mdesc is
  1014. * opened. Hold this reference until /dev/mdesc is closed to ensure
  1015. * mdesc data structure is not released underneath us. Store the
  1016. * pointer to mdesc structure in private_data for read and seek to use
  1017. */
  1018. static int mdesc_open(struct inode *inode, struct file *file)
  1019. {
  1020. struct mdesc_handle *hp = mdesc_grab();
  1021. if (!hp)
  1022. return -ENODEV;
  1023. file->private_data = hp;
  1024. return 0;
  1025. }
  1026. static ssize_t mdesc_read(struct file *file, char __user *buf,
  1027. size_t len, loff_t *offp)
  1028. {
  1029. struct mdesc_handle *hp = file->private_data;
  1030. unsigned char *mdesc;
  1031. int bytes_left, count = len;
  1032. if (*offp >= hp->handle_size)
  1033. return 0;
  1034. bytes_left = hp->handle_size - *offp;
  1035. if (count > bytes_left)
  1036. count = bytes_left;
  1037. mdesc = (unsigned char *)&hp->mdesc;
  1038. mdesc += *offp;
  1039. if (!copy_to_user(buf, mdesc, count)) {
  1040. *offp += count;
  1041. return count;
  1042. } else {
  1043. return -EFAULT;
  1044. }
  1045. }
  1046. static loff_t mdesc_llseek(struct file *file, loff_t offset, int whence)
  1047. {
  1048. struct mdesc_handle *hp = file->private_data;
  1049. return no_seek_end_llseek_size(file, offset, whence, hp->handle_size);
  1050. }
  1051. /* mdesc_close() - /dev/mdesc is being closed, release the reference to
  1052. * mdesc structure.
  1053. */
  1054. static int mdesc_close(struct inode *inode, struct file *file)
  1055. {
  1056. mdesc_release(file->private_data);
  1057. return 0;
  1058. }
  1059. static const struct file_operations mdesc_fops = {
  1060. .open = mdesc_open,
  1061. .read = mdesc_read,
  1062. .llseek = mdesc_llseek,
  1063. .release = mdesc_close,
  1064. .owner = THIS_MODULE,
  1065. };
  1066. static struct miscdevice mdesc_misc = {
  1067. .minor = MISC_DYNAMIC_MINOR,
  1068. .name = "mdesc",
  1069. .fops = &mdesc_fops,
  1070. };
  1071. static int __init mdesc_misc_init(void)
  1072. {
  1073. return misc_register(&mdesc_misc);
  1074. }
  1075. __initcall(mdesc_misc_init);
  1076. void __init sun4v_mdesc_init(void)
  1077. {
  1078. struct mdesc_handle *hp;
  1079. unsigned long len, real_len, status;
  1080. (void) sun4v_mach_desc(0UL, 0UL, &len);
  1081. printk("MDESC: Size is %lu bytes.\n", len);
  1082. hp = mdesc_alloc(len, &memblock_mdesc_ops);
  1083. if (hp == NULL) {
  1084. prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
  1085. prom_halt();
  1086. }
  1087. status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
  1088. if (status != HV_EOK || real_len > len) {
  1089. prom_printf("sun4v_mach_desc fails, err(%lu), "
  1090. "len(%lu), real_len(%lu)\n",
  1091. status, len, real_len);
  1092. mdesc_free(hp);
  1093. prom_halt();
  1094. }
  1095. cur_mdesc = hp;
  1096. mdesc_adi_init();
  1097. report_platform_properties();
  1098. }