node.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Basic Node interface support
  4. */
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/mm.h>
  8. #include <linux/memory.h>
  9. #include <linux/vmstat.h>
  10. #include <linux/notifier.h>
  11. #include <linux/node.h>
  12. #include <linux/hugetlb.h>
  13. #include <linux/compaction.h>
  14. #include <linux/cpumask.h>
  15. #include <linux/topology.h>
  16. #include <linux/nodemask.h>
  17. #include <linux/cpu.h>
  18. #include <linux/device.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/swap.h>
  21. #include <linux/slab.h>
  22. #include <linux/hugetlb.h>
  23. static struct bus_type node_subsys = {
  24. .name = "node",
  25. .dev_name = "node",
  26. };
  27. static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
  28. struct bin_attribute *attr, char *buf,
  29. loff_t off, size_t count)
  30. {
  31. struct device *dev = kobj_to_dev(kobj);
  32. struct node *node_dev = to_node(dev);
  33. cpumask_var_t mask;
  34. ssize_t n;
  35. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  36. return 0;
  37. cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
  38. n = cpumap_print_bitmask_to_buf(buf, mask, off, count);
  39. free_cpumask_var(mask);
  40. return n;
  41. }
  42. static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
  43. static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
  44. struct bin_attribute *attr, char *buf,
  45. loff_t off, size_t count)
  46. {
  47. struct device *dev = kobj_to_dev(kobj);
  48. struct node *node_dev = to_node(dev);
  49. cpumask_var_t mask;
  50. ssize_t n;
  51. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  52. return 0;
  53. cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
  54. n = cpumap_print_list_to_buf(buf, mask, off, count);
  55. free_cpumask_var(mask);
  56. return n;
  57. }
  58. static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
  59. /**
  60. * struct node_access_nodes - Access class device to hold user visible
  61. * relationships to other nodes.
  62. * @dev: Device for this memory access class
  63. * @list_node: List element in the node's access list
  64. * @access: The access class rank
  65. * @hmem_attrs: Heterogeneous memory performance attributes
  66. */
  67. struct node_access_nodes {
  68. struct device dev;
  69. struct list_head list_node;
  70. unsigned int access;
  71. #ifdef CONFIG_HMEM_REPORTING
  72. struct node_hmem_attrs hmem_attrs;
  73. #endif
  74. };
  75. #define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
  76. static struct attribute *node_init_access_node_attrs[] = {
  77. NULL,
  78. };
  79. static struct attribute *node_targ_access_node_attrs[] = {
  80. NULL,
  81. };
  82. static const struct attribute_group initiators = {
  83. .name = "initiators",
  84. .attrs = node_init_access_node_attrs,
  85. };
  86. static const struct attribute_group targets = {
  87. .name = "targets",
  88. .attrs = node_targ_access_node_attrs,
  89. };
  90. static const struct attribute_group *node_access_node_groups[] = {
  91. &initiators,
  92. &targets,
  93. NULL,
  94. };
  95. static void node_remove_accesses(struct node *node)
  96. {
  97. struct node_access_nodes *c, *cnext;
  98. list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
  99. list_del(&c->list_node);
  100. device_unregister(&c->dev);
  101. }
  102. }
  103. static void node_access_release(struct device *dev)
  104. {
  105. kfree(to_access_nodes(dev));
  106. }
  107. static struct node_access_nodes *node_init_node_access(struct node *node,
  108. unsigned int access)
  109. {
  110. struct node_access_nodes *access_node;
  111. struct device *dev;
  112. list_for_each_entry(access_node, &node->access_list, list_node)
  113. if (access_node->access == access)
  114. return access_node;
  115. access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
  116. if (!access_node)
  117. return NULL;
  118. access_node->access = access;
  119. dev = &access_node->dev;
  120. dev->parent = &node->dev;
  121. dev->release = node_access_release;
  122. dev->groups = node_access_node_groups;
  123. if (dev_set_name(dev, "access%u", access))
  124. goto free;
  125. if (device_register(dev))
  126. goto free_name;
  127. pm_runtime_no_callbacks(dev);
  128. list_add_tail(&access_node->list_node, &node->access_list);
  129. return access_node;
  130. free_name:
  131. kfree_const(dev->kobj.name);
  132. free:
  133. kfree(access_node);
  134. return NULL;
  135. }
  136. #ifdef CONFIG_HMEM_REPORTING
  137. #define ACCESS_ATTR(name) \
  138. static ssize_t name##_show(struct device *dev, \
  139. struct device_attribute *attr, \
  140. char *buf) \
  141. { \
  142. return sysfs_emit(buf, "%u\n", \
  143. to_access_nodes(dev)->hmem_attrs.name); \
  144. } \
  145. static DEVICE_ATTR_RO(name)
  146. ACCESS_ATTR(read_bandwidth);
  147. ACCESS_ATTR(read_latency);
  148. ACCESS_ATTR(write_bandwidth);
  149. ACCESS_ATTR(write_latency);
  150. static struct attribute *access_attrs[] = {
  151. &dev_attr_read_bandwidth.attr,
  152. &dev_attr_read_latency.attr,
  153. &dev_attr_write_bandwidth.attr,
  154. &dev_attr_write_latency.attr,
  155. NULL,
  156. };
  157. /**
  158. * node_set_perf_attrs - Set the performance values for given access class
  159. * @nid: Node identifier to be set
  160. * @hmem_attrs: Heterogeneous memory performance attributes
  161. * @access: The access class the for the given attributes
  162. */
  163. void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
  164. unsigned int access)
  165. {
  166. struct node_access_nodes *c;
  167. struct node *node;
  168. int i;
  169. if (WARN_ON_ONCE(!node_online(nid)))
  170. return;
  171. node = node_devices[nid];
  172. c = node_init_node_access(node, access);
  173. if (!c)
  174. return;
  175. c->hmem_attrs = *hmem_attrs;
  176. for (i = 0; access_attrs[i] != NULL; i++) {
  177. if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
  178. "initiators")) {
  179. pr_info("failed to add performance attribute to node %d\n",
  180. nid);
  181. break;
  182. }
  183. }
  184. }
  185. /**
  186. * struct node_cache_info - Internal tracking for memory node caches
  187. * @dev: Device represeting the cache level
  188. * @node: List element for tracking in the node
  189. * @cache_attrs:Attributes for this cache level
  190. */
  191. struct node_cache_info {
  192. struct device dev;
  193. struct list_head node;
  194. struct node_cache_attrs cache_attrs;
  195. };
  196. #define to_cache_info(device) container_of(device, struct node_cache_info, dev)
  197. #define CACHE_ATTR(name, fmt) \
  198. static ssize_t name##_show(struct device *dev, \
  199. struct device_attribute *attr, \
  200. char *buf) \
  201. { \
  202. return sysfs_emit(buf, fmt "\n", \
  203. to_cache_info(dev)->cache_attrs.name); \
  204. } \
  205. static DEVICE_ATTR_RO(name);
  206. CACHE_ATTR(size, "%llu")
  207. CACHE_ATTR(line_size, "%u")
  208. CACHE_ATTR(indexing, "%u")
  209. CACHE_ATTR(write_policy, "%u")
  210. static struct attribute *cache_attrs[] = {
  211. &dev_attr_indexing.attr,
  212. &dev_attr_size.attr,
  213. &dev_attr_line_size.attr,
  214. &dev_attr_write_policy.attr,
  215. NULL,
  216. };
  217. ATTRIBUTE_GROUPS(cache);
  218. static void node_cache_release(struct device *dev)
  219. {
  220. kfree(dev);
  221. }
  222. static void node_cacheinfo_release(struct device *dev)
  223. {
  224. struct node_cache_info *info = to_cache_info(dev);
  225. kfree(info);
  226. }
  227. static void node_init_cache_dev(struct node *node)
  228. {
  229. struct device *dev;
  230. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  231. if (!dev)
  232. return;
  233. device_initialize(dev);
  234. dev->parent = &node->dev;
  235. dev->release = node_cache_release;
  236. if (dev_set_name(dev, "memory_side_cache"))
  237. goto put_device;
  238. if (device_add(dev))
  239. goto put_device;
  240. pm_runtime_no_callbacks(dev);
  241. node->cache_dev = dev;
  242. return;
  243. put_device:
  244. put_device(dev);
  245. }
  246. /**
  247. * node_add_cache() - add cache attribute to a memory node
  248. * @nid: Node identifier that has new cache attributes
  249. * @cache_attrs: Attributes for the cache being added
  250. */
  251. void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
  252. {
  253. struct node_cache_info *info;
  254. struct device *dev;
  255. struct node *node;
  256. if (!node_online(nid) || !node_devices[nid])
  257. return;
  258. node = node_devices[nid];
  259. list_for_each_entry(info, &node->cache_attrs, node) {
  260. if (info->cache_attrs.level == cache_attrs->level) {
  261. dev_warn(&node->dev,
  262. "attempt to add duplicate cache level:%d\n",
  263. cache_attrs->level);
  264. return;
  265. }
  266. }
  267. if (!node->cache_dev)
  268. node_init_cache_dev(node);
  269. if (!node->cache_dev)
  270. return;
  271. info = kzalloc(sizeof(*info), GFP_KERNEL);
  272. if (!info)
  273. return;
  274. dev = &info->dev;
  275. device_initialize(dev);
  276. dev->parent = node->cache_dev;
  277. dev->release = node_cacheinfo_release;
  278. dev->groups = cache_groups;
  279. if (dev_set_name(dev, "index%d", cache_attrs->level))
  280. goto put_device;
  281. info->cache_attrs = *cache_attrs;
  282. if (device_add(dev)) {
  283. dev_warn(&node->dev, "failed to add cache level:%d\n",
  284. cache_attrs->level);
  285. goto put_device;
  286. }
  287. pm_runtime_no_callbacks(dev);
  288. list_add_tail(&info->node, &node->cache_attrs);
  289. return;
  290. put_device:
  291. put_device(dev);
  292. }
  293. static void node_remove_caches(struct node *node)
  294. {
  295. struct node_cache_info *info, *next;
  296. if (!node->cache_dev)
  297. return;
  298. list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
  299. list_del(&info->node);
  300. device_unregister(&info->dev);
  301. }
  302. device_unregister(node->cache_dev);
  303. }
  304. static void node_init_caches(unsigned int nid)
  305. {
  306. INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
  307. }
  308. #else
  309. static void node_init_caches(unsigned int nid) { }
  310. static void node_remove_caches(struct node *node) { }
  311. #endif
  312. #define K(x) ((x) << (PAGE_SHIFT - 10))
  313. static ssize_t node_read_meminfo(struct device *dev,
  314. struct device_attribute *attr, char *buf)
  315. {
  316. int len = 0;
  317. int nid = dev->id;
  318. struct pglist_data *pgdat = NODE_DATA(nid);
  319. struct sysinfo i;
  320. unsigned long sreclaimable, sunreclaimable;
  321. unsigned long swapcached = 0;
  322. si_meminfo_node(&i, nid);
  323. sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
  324. sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
  325. #ifdef CONFIG_SWAP
  326. swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE);
  327. #endif
  328. len = sysfs_emit_at(buf, len,
  329. "Node %d MemTotal: %8lu kB\n"
  330. "Node %d MemFree: %8lu kB\n"
  331. "Node %d MemUsed: %8lu kB\n"
  332. "Node %d SwapCached: %8lu kB\n"
  333. "Node %d Active: %8lu kB\n"
  334. "Node %d Inactive: %8lu kB\n"
  335. "Node %d Active(anon): %8lu kB\n"
  336. "Node %d Inactive(anon): %8lu kB\n"
  337. "Node %d Active(file): %8lu kB\n"
  338. "Node %d Inactive(file): %8lu kB\n"
  339. "Node %d Unevictable: %8lu kB\n"
  340. "Node %d Mlocked: %8lu kB\n",
  341. nid, K(i.totalram),
  342. nid, K(i.freeram),
  343. nid, K(i.totalram - i.freeram),
  344. nid, K(swapcached),
  345. nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
  346. node_page_state(pgdat, NR_ACTIVE_FILE)),
  347. nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
  348. node_page_state(pgdat, NR_INACTIVE_FILE)),
  349. nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
  350. nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
  351. nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
  352. nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
  353. nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
  354. nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
  355. #ifdef CONFIG_HIGHMEM
  356. len += sysfs_emit_at(buf, len,
  357. "Node %d HighTotal: %8lu kB\n"
  358. "Node %d HighFree: %8lu kB\n"
  359. "Node %d LowTotal: %8lu kB\n"
  360. "Node %d LowFree: %8lu kB\n",
  361. nid, K(i.totalhigh),
  362. nid, K(i.freehigh),
  363. nid, K(i.totalram - i.totalhigh),
  364. nid, K(i.freeram - i.freehigh));
  365. #endif
  366. len += sysfs_emit_at(buf, len,
  367. "Node %d Dirty: %8lu kB\n"
  368. "Node %d Writeback: %8lu kB\n"
  369. "Node %d FilePages: %8lu kB\n"
  370. "Node %d Mapped: %8lu kB\n"
  371. "Node %d AnonPages: %8lu kB\n"
  372. "Node %d Shmem: %8lu kB\n"
  373. "Node %d KernelStack: %8lu kB\n"
  374. #ifdef CONFIG_SHADOW_CALL_STACK
  375. "Node %d ShadowCallStack:%8lu kB\n"
  376. #endif
  377. "Node %d PageTables: %8lu kB\n"
  378. "Node %d SecPageTables: %8lu kB\n"
  379. "Node %d NFS_Unstable: %8lu kB\n"
  380. "Node %d Bounce: %8lu kB\n"
  381. "Node %d WritebackTmp: %8lu kB\n"
  382. "Node %d KReclaimable: %8lu kB\n"
  383. "Node %d Slab: %8lu kB\n"
  384. "Node %d SReclaimable: %8lu kB\n"
  385. "Node %d SUnreclaim: %8lu kB\n"
  386. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  387. "Node %d AnonHugePages: %8lu kB\n"
  388. "Node %d ShmemHugePages: %8lu kB\n"
  389. "Node %d ShmemPmdMapped: %8lu kB\n"
  390. "Node %d FileHugePages: %8lu kB\n"
  391. "Node %d FilePmdMapped: %8lu kB\n"
  392. #endif
  393. ,
  394. nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
  395. nid, K(node_page_state(pgdat, NR_WRITEBACK)),
  396. nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
  397. nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
  398. nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
  399. nid, K(i.sharedram),
  400. nid, node_page_state(pgdat, NR_KERNEL_STACK_KB),
  401. #ifdef CONFIG_SHADOW_CALL_STACK
  402. nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
  403. #endif
  404. nid, K(node_page_state(pgdat, NR_PAGETABLE)),
  405. nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
  406. nid, 0UL,
  407. nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
  408. nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
  409. nid, K(sreclaimable +
  410. node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
  411. nid, K(sreclaimable + sunreclaimable),
  412. nid, K(sreclaimable),
  413. nid, K(sunreclaimable)
  414. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  415. ,
  416. nid, K(node_page_state(pgdat, NR_ANON_THPS)),
  417. nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
  418. nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
  419. nid, K(node_page_state(pgdat, NR_FILE_THPS)),
  420. nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED))
  421. #endif
  422. );
  423. len += hugetlb_report_node_meminfo(buf, len, nid);
  424. return len;
  425. }
  426. #undef K
  427. static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL);
  428. static ssize_t node_read_numastat(struct device *dev,
  429. struct device_attribute *attr, char *buf)
  430. {
  431. fold_vm_numa_events();
  432. return sysfs_emit(buf,
  433. "numa_hit %lu\n"
  434. "numa_miss %lu\n"
  435. "numa_foreign %lu\n"
  436. "interleave_hit %lu\n"
  437. "local_node %lu\n"
  438. "other_node %lu\n",
  439. sum_zone_numa_event_state(dev->id, NUMA_HIT),
  440. sum_zone_numa_event_state(dev->id, NUMA_MISS),
  441. sum_zone_numa_event_state(dev->id, NUMA_FOREIGN),
  442. sum_zone_numa_event_state(dev->id, NUMA_INTERLEAVE_HIT),
  443. sum_zone_numa_event_state(dev->id, NUMA_LOCAL),
  444. sum_zone_numa_event_state(dev->id, NUMA_OTHER));
  445. }
  446. static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL);
  447. static ssize_t node_read_vmstat(struct device *dev,
  448. struct device_attribute *attr, char *buf)
  449. {
  450. int nid = dev->id;
  451. struct pglist_data *pgdat = NODE_DATA(nid);
  452. int i;
  453. int len = 0;
  454. for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  455. len += sysfs_emit_at(buf, len, "%s %lu\n",
  456. zone_stat_name(i),
  457. sum_zone_node_page_state(nid, i));
  458. #ifdef CONFIG_NUMA
  459. fold_vm_numa_events();
  460. for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
  461. len += sysfs_emit_at(buf, len, "%s %lu\n",
  462. numa_stat_name(i),
  463. sum_zone_numa_event_state(nid, i));
  464. #endif
  465. for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
  466. unsigned long pages = node_page_state_pages(pgdat, i);
  467. if (vmstat_item_print_in_thp(i))
  468. pages /= HPAGE_PMD_NR;
  469. len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i),
  470. pages);
  471. }
  472. return len;
  473. }
  474. static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL);
  475. static ssize_t node_read_distance(struct device *dev,
  476. struct device_attribute *attr, char *buf)
  477. {
  478. int nid = dev->id;
  479. int len = 0;
  480. int i;
  481. /*
  482. * buf is currently PAGE_SIZE in length and each node needs 4 chars
  483. * at the most (distance + space or newline).
  484. */
  485. BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
  486. for_each_online_node(i) {
  487. len += sysfs_emit_at(buf, len, "%s%d",
  488. i ? " " : "", node_distance(nid, i));
  489. }
  490. len += sysfs_emit_at(buf, len, "\n");
  491. return len;
  492. }
  493. static DEVICE_ATTR(distance, 0444, node_read_distance, NULL);
  494. static struct attribute *node_dev_attrs[] = {
  495. &dev_attr_meminfo.attr,
  496. &dev_attr_numastat.attr,
  497. &dev_attr_distance.attr,
  498. &dev_attr_vmstat.attr,
  499. NULL
  500. };
  501. static struct bin_attribute *node_dev_bin_attrs[] = {
  502. &bin_attr_cpumap,
  503. &bin_attr_cpulist,
  504. NULL
  505. };
  506. static const struct attribute_group node_dev_group = {
  507. .attrs = node_dev_attrs,
  508. .bin_attrs = node_dev_bin_attrs
  509. };
  510. static const struct attribute_group *node_dev_groups[] = {
  511. &node_dev_group,
  512. #ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
  513. &arch_node_dev_group,
  514. #endif
  515. NULL
  516. };
  517. static void node_device_release(struct device *dev)
  518. {
  519. kfree(to_node(dev));
  520. }
  521. /*
  522. * register_node - Setup a sysfs device for a node.
  523. * @num - Node number to use when creating the device.
  524. *
  525. * Initialize and register the node device.
  526. */
  527. static int register_node(struct node *node, int num)
  528. {
  529. int error;
  530. node->dev.id = num;
  531. node->dev.bus = &node_subsys;
  532. node->dev.release = node_device_release;
  533. node->dev.groups = node_dev_groups;
  534. error = device_register(&node->dev);
  535. if (error) {
  536. put_device(&node->dev);
  537. } else {
  538. hugetlb_register_node(node);
  539. compaction_register_node(node);
  540. }
  541. return error;
  542. }
  543. /**
  544. * unregister_node - unregister a node device
  545. * @node: node going away
  546. *
  547. * Unregisters a node device @node. All the devices on the node must be
  548. * unregistered before calling this function.
  549. */
  550. void unregister_node(struct node *node)
  551. {
  552. hugetlb_unregister_node(node);
  553. compaction_unregister_node(node);
  554. node_remove_accesses(node);
  555. node_remove_caches(node);
  556. device_unregister(&node->dev);
  557. }
  558. struct node *node_devices[MAX_NUMNODES];
  559. /*
  560. * register cpu under node
  561. */
  562. int register_cpu_under_node(unsigned int cpu, unsigned int nid)
  563. {
  564. int ret;
  565. struct device *obj;
  566. if (!node_online(nid))
  567. return 0;
  568. obj = get_cpu_device(cpu);
  569. if (!obj)
  570. return 0;
  571. ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
  572. &obj->kobj,
  573. kobject_name(&obj->kobj));
  574. if (ret)
  575. return ret;
  576. return sysfs_create_link(&obj->kobj,
  577. &node_devices[nid]->dev.kobj,
  578. kobject_name(&node_devices[nid]->dev.kobj));
  579. }
  580. /**
  581. * register_memory_node_under_compute_node - link memory node to its compute
  582. * node for a given access class.
  583. * @mem_nid: Memory node number
  584. * @cpu_nid: Cpu node number
  585. * @access: Access class to register
  586. *
  587. * Description:
  588. * For use with platforms that may have separate memory and compute nodes.
  589. * This function will export node relationships linking which memory
  590. * initiator nodes can access memory targets at a given ranked access
  591. * class.
  592. */
  593. int register_memory_node_under_compute_node(unsigned int mem_nid,
  594. unsigned int cpu_nid,
  595. unsigned int access)
  596. {
  597. struct node *init_node, *targ_node;
  598. struct node_access_nodes *initiator, *target;
  599. int ret;
  600. if (!node_online(cpu_nid) || !node_online(mem_nid))
  601. return -ENODEV;
  602. init_node = node_devices[cpu_nid];
  603. targ_node = node_devices[mem_nid];
  604. initiator = node_init_node_access(init_node, access);
  605. target = node_init_node_access(targ_node, access);
  606. if (!initiator || !target)
  607. return -ENOMEM;
  608. ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
  609. &targ_node->dev.kobj,
  610. dev_name(&targ_node->dev));
  611. if (ret)
  612. return ret;
  613. ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
  614. &init_node->dev.kobj,
  615. dev_name(&init_node->dev));
  616. if (ret)
  617. goto err;
  618. return 0;
  619. err:
  620. sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
  621. dev_name(&targ_node->dev));
  622. return ret;
  623. }
  624. int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
  625. {
  626. struct device *obj;
  627. if (!node_online(nid))
  628. return 0;
  629. obj = get_cpu_device(cpu);
  630. if (!obj)
  631. return 0;
  632. sysfs_remove_link(&node_devices[nid]->dev.kobj,
  633. kobject_name(&obj->kobj));
  634. sysfs_remove_link(&obj->kobj,
  635. kobject_name(&node_devices[nid]->dev.kobj));
  636. return 0;
  637. }
  638. #ifdef CONFIG_MEMORY_HOTPLUG
  639. static int __ref get_nid_for_pfn(unsigned long pfn)
  640. {
  641. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  642. if (system_state < SYSTEM_RUNNING)
  643. return early_pfn_to_nid(pfn);
  644. #endif
  645. return pfn_to_nid(pfn);
  646. }
  647. static void do_register_memory_block_under_node(int nid,
  648. struct memory_block *mem_blk,
  649. enum meminit_context context)
  650. {
  651. int ret;
  652. memory_block_add_nid(mem_blk, nid, context);
  653. ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
  654. &mem_blk->dev.kobj,
  655. kobject_name(&mem_blk->dev.kobj));
  656. if (ret && ret != -EEXIST)
  657. dev_err_ratelimited(&node_devices[nid]->dev,
  658. "can't create link to %s in sysfs (%d)\n",
  659. kobject_name(&mem_blk->dev.kobj), ret);
  660. ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj,
  661. &node_devices[nid]->dev.kobj,
  662. kobject_name(&node_devices[nid]->dev.kobj));
  663. if (ret && ret != -EEXIST)
  664. dev_err_ratelimited(&mem_blk->dev,
  665. "can't create link to %s in sysfs (%d)\n",
  666. kobject_name(&node_devices[nid]->dev.kobj),
  667. ret);
  668. }
  669. /* register memory section under specified node if it spans that node */
  670. static int register_mem_block_under_node_early(struct memory_block *mem_blk,
  671. void *arg)
  672. {
  673. unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
  674. unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
  675. unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
  676. int nid = *(int *)arg;
  677. unsigned long pfn;
  678. for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
  679. int page_nid;
  680. /*
  681. * memory block could have several absent sections from start.
  682. * skip pfn range from absent section
  683. */
  684. if (!pfn_in_present_section(pfn)) {
  685. pfn = round_down(pfn + PAGES_PER_SECTION,
  686. PAGES_PER_SECTION) - 1;
  687. continue;
  688. }
  689. /*
  690. * We need to check if page belongs to nid only at the boot
  691. * case because node's ranges can be interleaved.
  692. */
  693. page_nid = get_nid_for_pfn(pfn);
  694. if (page_nid < 0)
  695. continue;
  696. if (page_nid != nid)
  697. continue;
  698. do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
  699. return 0;
  700. }
  701. /* mem section does not span the specified node */
  702. return 0;
  703. }
  704. /*
  705. * During hotplug we know that all pages in the memory block belong to the same
  706. * node.
  707. */
  708. static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
  709. void *arg)
  710. {
  711. int nid = *(int *)arg;
  712. do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
  713. return 0;
  714. }
  715. /*
  716. * Unregister a memory block device under the node it spans. Memory blocks
  717. * with multiple nodes cannot be offlined and therefore also never be removed.
  718. */
  719. void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
  720. {
  721. if (mem_blk->nid == NUMA_NO_NODE)
  722. return;
  723. sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
  724. kobject_name(&mem_blk->dev.kobj));
  725. sysfs_remove_link(&mem_blk->dev.kobj,
  726. kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
  727. }
  728. void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
  729. unsigned long end_pfn,
  730. enum meminit_context context)
  731. {
  732. walk_memory_blocks_func_t func;
  733. if (context == MEMINIT_HOTPLUG)
  734. func = register_mem_block_under_node_hotplug;
  735. else
  736. func = register_mem_block_under_node_early;
  737. walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
  738. (void *)&nid, func);
  739. return;
  740. }
  741. #endif /* CONFIG_MEMORY_HOTPLUG */
  742. int __register_one_node(int nid)
  743. {
  744. int error;
  745. int cpu;
  746. node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
  747. if (!node_devices[nid])
  748. return -ENOMEM;
  749. error = register_node(node_devices[nid], nid);
  750. /* link cpu under this node */
  751. for_each_present_cpu(cpu) {
  752. if (cpu_to_node(cpu) == nid)
  753. register_cpu_under_node(cpu, nid);
  754. }
  755. INIT_LIST_HEAD(&node_devices[nid]->access_list);
  756. node_init_caches(nid);
  757. return error;
  758. }
  759. void unregister_one_node(int nid)
  760. {
  761. if (!node_devices[nid])
  762. return;
  763. unregister_node(node_devices[nid]);
  764. node_devices[nid] = NULL;
  765. }
  766. /*
  767. * node states attributes
  768. */
  769. struct node_attr {
  770. struct device_attribute attr;
  771. enum node_states state;
  772. };
  773. static ssize_t show_node_state(struct device *dev,
  774. struct device_attribute *attr, char *buf)
  775. {
  776. struct node_attr *na = container_of(attr, struct node_attr, attr);
  777. return sysfs_emit(buf, "%*pbl\n",
  778. nodemask_pr_args(&node_states[na->state]));
  779. }
  780. #define _NODE_ATTR(name, state) \
  781. { __ATTR(name, 0444, show_node_state, NULL), state }
  782. static struct node_attr node_state_attr[] = {
  783. [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
  784. [N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
  785. [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
  786. #ifdef CONFIG_HIGHMEM
  787. [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
  788. #endif
  789. [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
  790. [N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
  791. [N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator,
  792. N_GENERIC_INITIATOR),
  793. };
  794. static struct attribute *node_state_attrs[] = {
  795. &node_state_attr[N_POSSIBLE].attr.attr,
  796. &node_state_attr[N_ONLINE].attr.attr,
  797. &node_state_attr[N_NORMAL_MEMORY].attr.attr,
  798. #ifdef CONFIG_HIGHMEM
  799. &node_state_attr[N_HIGH_MEMORY].attr.attr,
  800. #endif
  801. &node_state_attr[N_MEMORY].attr.attr,
  802. &node_state_attr[N_CPU].attr.attr,
  803. &node_state_attr[N_GENERIC_INITIATOR].attr.attr,
  804. NULL
  805. };
  806. static const struct attribute_group memory_root_attr_group = {
  807. .attrs = node_state_attrs,
  808. };
  809. static const struct attribute_group *cpu_root_attr_groups[] = {
  810. &memory_root_attr_group,
  811. NULL,
  812. };
  813. void __init node_dev_init(void)
  814. {
  815. int ret, i;
  816. BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
  817. BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
  818. ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
  819. if (ret)
  820. panic("%s() failed to register subsystem: %d\n", __func__, ret);
  821. /*
  822. * Create all node devices, which will properly link the node
  823. * to applicable memory block devices and already created cpu devices.
  824. */
  825. for_each_online_node(i) {
  826. ret = register_one_node(i);
  827. if (ret)
  828. panic("%s() failed to add node: %d\n", __func__, ret);
  829. }
  830. }