cacheinfo.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * cacheinfo support - processor cache information via sysfs
  4. *
  5. * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
  6. * Author: Sudeep Holla <[email protected]>
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/acpi.h>
  10. #include <linux/bitops.h>
  11. #include <linux/cacheinfo.h>
  12. #include <linux/compiler.h>
  13. #include <linux/cpu.h>
  14. #include <linux/device.h>
  15. #include <linux/init.h>
  16. #include <linux/of_device.h>
  17. #include <linux/sched.h>
  18. #include <linux/slab.h>
  19. #include <linux/smp.h>
  20. #include <linux/sysfs.h>
  21. /* pointer to per cpu cacheinfo */
  22. static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
  23. #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
  24. #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
  25. #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
  26. #define per_cpu_cacheinfo_idx(cpu, idx) \
  27. (per_cpu_cacheinfo(cpu) + (idx))
  28. struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
  29. {
  30. return ci_cacheinfo(cpu);
  31. }
  32. static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
  33. struct cacheinfo *sib_leaf)
  34. {
  35. /*
  36. * For non DT/ACPI systems, assume unique level 1 caches,
  37. * system-wide shared caches for all other levels.
  38. */
  39. if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
  40. return (this_leaf->level != 1) && (sib_leaf->level != 1);
  41. if ((sib_leaf->attributes & CACHE_ID) &&
  42. (this_leaf->attributes & CACHE_ID))
  43. return sib_leaf->id == this_leaf->id;
  44. return sib_leaf->fw_token == this_leaf->fw_token;
  45. }
  46. bool last_level_cache_is_valid(unsigned int cpu)
  47. {
  48. struct cacheinfo *llc;
  49. if (!cache_leaves(cpu))
  50. return false;
  51. llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
  52. return (llc->attributes & CACHE_ID) || !!llc->fw_token;
  53. }
  54. bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
  55. {
  56. struct cacheinfo *llc_x, *llc_y;
  57. if (!last_level_cache_is_valid(cpu_x) ||
  58. !last_level_cache_is_valid(cpu_y))
  59. return false;
  60. llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
  61. llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
  62. return cache_leaves_are_shared(llc_x, llc_y);
  63. }
  64. #ifdef CONFIG_OF
  65. /* OF properties to query for a given cache type */
  66. struct cache_type_info {
  67. const char *size_prop;
  68. const char *line_size_props[2];
  69. const char *nr_sets_prop;
  70. };
  71. static const struct cache_type_info cache_type_info[] = {
  72. {
  73. .size_prop = "cache-size",
  74. .line_size_props = { "cache-line-size",
  75. "cache-block-size", },
  76. .nr_sets_prop = "cache-sets",
  77. }, {
  78. .size_prop = "i-cache-size",
  79. .line_size_props = { "i-cache-line-size",
  80. "i-cache-block-size", },
  81. .nr_sets_prop = "i-cache-sets",
  82. }, {
  83. .size_prop = "d-cache-size",
  84. .line_size_props = { "d-cache-line-size",
  85. "d-cache-block-size", },
  86. .nr_sets_prop = "d-cache-sets",
  87. },
  88. };
  89. static inline int get_cacheinfo_idx(enum cache_type type)
  90. {
  91. if (type == CACHE_TYPE_UNIFIED)
  92. return 0;
  93. return type;
  94. }
  95. static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
  96. {
  97. const char *propname;
  98. int ct_idx;
  99. ct_idx = get_cacheinfo_idx(this_leaf->type);
  100. propname = cache_type_info[ct_idx].size_prop;
  101. of_property_read_u32(np, propname, &this_leaf->size);
  102. }
  103. /* not cache_line_size() because that's a macro in include/linux/cache.h */
  104. static void cache_get_line_size(struct cacheinfo *this_leaf,
  105. struct device_node *np)
  106. {
  107. int i, lim, ct_idx;
  108. ct_idx = get_cacheinfo_idx(this_leaf->type);
  109. lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
  110. for (i = 0; i < lim; i++) {
  111. int ret;
  112. u32 line_size;
  113. const char *propname;
  114. propname = cache_type_info[ct_idx].line_size_props[i];
  115. ret = of_property_read_u32(np, propname, &line_size);
  116. if (!ret) {
  117. this_leaf->coherency_line_size = line_size;
  118. break;
  119. }
  120. }
  121. }
  122. static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
  123. {
  124. const char *propname;
  125. int ct_idx;
  126. ct_idx = get_cacheinfo_idx(this_leaf->type);
  127. propname = cache_type_info[ct_idx].nr_sets_prop;
  128. of_property_read_u32(np, propname, &this_leaf->number_of_sets);
  129. }
  130. static void cache_associativity(struct cacheinfo *this_leaf)
  131. {
  132. unsigned int line_size = this_leaf->coherency_line_size;
  133. unsigned int nr_sets = this_leaf->number_of_sets;
  134. unsigned int size = this_leaf->size;
  135. /*
  136. * If the cache is fully associative, there is no need to
  137. * check the other properties.
  138. */
  139. if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
  140. this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
  141. }
  142. static bool cache_node_is_unified(struct cacheinfo *this_leaf,
  143. struct device_node *np)
  144. {
  145. return of_property_read_bool(np, "cache-unified");
  146. }
  147. static void cache_of_set_props(struct cacheinfo *this_leaf,
  148. struct device_node *np)
  149. {
  150. /*
  151. * init_cache_level must setup the cache level correctly
  152. * overriding the architecturally specified levels, so
  153. * if type is NONE at this stage, it should be unified
  154. */
  155. if (this_leaf->type == CACHE_TYPE_NOCACHE &&
  156. cache_node_is_unified(this_leaf, np))
  157. this_leaf->type = CACHE_TYPE_UNIFIED;
  158. cache_size(this_leaf, np);
  159. cache_get_line_size(this_leaf, np);
  160. cache_nr_sets(this_leaf, np);
  161. cache_associativity(this_leaf);
  162. }
  163. static int cache_setup_of_node(unsigned int cpu)
  164. {
  165. struct device_node *np;
  166. struct cacheinfo *this_leaf;
  167. unsigned int index = 0;
  168. np = of_cpu_device_node_get(cpu);
  169. if (!np) {
  170. pr_err("Failed to find cpu%d device node\n", cpu);
  171. return -ENOENT;
  172. }
  173. while (index < cache_leaves(cpu)) {
  174. this_leaf = per_cpu_cacheinfo_idx(cpu, index);
  175. if (this_leaf->level != 1)
  176. np = of_find_next_cache_node(np);
  177. else
  178. np = of_node_get(np);/* cpu node itself */
  179. if (!np)
  180. break;
  181. cache_of_set_props(this_leaf, np);
  182. this_leaf->fw_token = np;
  183. index++;
  184. }
  185. if (index != cache_leaves(cpu)) /* not all OF nodes populated */
  186. return -ENOENT;
  187. return 0;
  188. }
  189. #else
  190. static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
  191. #endif
  192. int __weak cache_setup_acpi(unsigned int cpu)
  193. {
  194. return -ENOTSUPP;
  195. }
  196. unsigned int coherency_max_size;
  197. static int cache_setup_properties(unsigned int cpu)
  198. {
  199. int ret = 0;
  200. if (of_have_populated_dt())
  201. ret = cache_setup_of_node(cpu);
  202. else if (!acpi_disabled)
  203. ret = cache_setup_acpi(cpu);
  204. return ret;
  205. }
  206. static int cache_shared_cpu_map_setup(unsigned int cpu)
  207. {
  208. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  209. struct cacheinfo *this_leaf, *sib_leaf;
  210. unsigned int index, sib_index;
  211. int ret = 0;
  212. if (this_cpu_ci->cpu_map_populated)
  213. return 0;
  214. /*
  215. * skip setting up cache properties if LLC is valid, just need
  216. * to update the shared cpu_map if the cache attributes were
  217. * populated early before all the cpus are brought online
  218. */
  219. if (!last_level_cache_is_valid(cpu)) {
  220. ret = cache_setup_properties(cpu);
  221. if (ret)
  222. return ret;
  223. }
  224. for (index = 0; index < cache_leaves(cpu); index++) {
  225. unsigned int i;
  226. this_leaf = per_cpu_cacheinfo_idx(cpu, index);
  227. cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
  228. for_each_online_cpu(i) {
  229. struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
  230. if (i == cpu || !sib_cpu_ci->info_list)
  231. continue;/* skip if itself or no cacheinfo */
  232. for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
  233. sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
  234. /*
  235. * Comparing cache IDs only makes sense if the leaves
  236. * belong to the same cache level of same type. Skip
  237. * the check if level and type do not match.
  238. */
  239. if (sib_leaf->level != this_leaf->level ||
  240. sib_leaf->type != this_leaf->type)
  241. continue;
  242. if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
  243. cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
  244. cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
  245. break;
  246. }
  247. }
  248. }
  249. /* record the maximum cache line size */
  250. if (this_leaf->coherency_line_size > coherency_max_size)
  251. coherency_max_size = this_leaf->coherency_line_size;
  252. }
  253. return 0;
  254. }
  255. static void cache_shared_cpu_map_remove(unsigned int cpu)
  256. {
  257. struct cacheinfo *this_leaf, *sib_leaf;
  258. unsigned int sibling, index, sib_index;
  259. for (index = 0; index < cache_leaves(cpu); index++) {
  260. this_leaf = per_cpu_cacheinfo_idx(cpu, index);
  261. for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
  262. struct cpu_cacheinfo *sib_cpu_ci =
  263. get_cpu_cacheinfo(sibling);
  264. if (sibling == cpu || !sib_cpu_ci->info_list)
  265. continue;/* skip if itself or no cacheinfo */
  266. for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
  267. sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
  268. /*
  269. * Comparing cache IDs only makes sense if the leaves
  270. * belong to the same cache level of same type. Skip
  271. * the check if level and type do not match.
  272. */
  273. if (sib_leaf->level != this_leaf->level ||
  274. sib_leaf->type != this_leaf->type)
  275. continue;
  276. if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
  277. cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
  278. cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
  279. break;
  280. }
  281. }
  282. }
  283. if (of_have_populated_dt())
  284. of_node_put(this_leaf->fw_token);
  285. }
  286. }
  287. static void free_cache_attributes(unsigned int cpu)
  288. {
  289. if (!per_cpu_cacheinfo(cpu))
  290. return;
  291. cache_shared_cpu_map_remove(cpu);
  292. kfree(per_cpu_cacheinfo(cpu));
  293. per_cpu_cacheinfo(cpu) = NULL;
  294. cache_leaves(cpu) = 0;
  295. }
  296. int __weak init_cache_level(unsigned int cpu)
  297. {
  298. return -ENOENT;
  299. }
  300. int __weak populate_cache_leaves(unsigned int cpu)
  301. {
  302. return -ENOENT;
  303. }
  304. int detect_cache_attributes(unsigned int cpu)
  305. {
  306. int ret;
  307. /* Since early detection of the cacheinfo is allowed via this
  308. * function and this also gets called as CPU hotplug callbacks via
  309. * cacheinfo_cpu_online, the initialisation can be skipped and only
  310. * CPU maps can be updated as the CPU online status would be update
  311. * if called via cacheinfo_cpu_online path.
  312. */
  313. if (per_cpu_cacheinfo(cpu))
  314. goto update_cpu_map;
  315. if (init_cache_level(cpu) || !cache_leaves(cpu))
  316. return -ENOENT;
  317. per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
  318. sizeof(struct cacheinfo), GFP_ATOMIC);
  319. if (per_cpu_cacheinfo(cpu) == NULL) {
  320. cache_leaves(cpu) = 0;
  321. return -ENOMEM;
  322. }
  323. /*
  324. * populate_cache_leaves() may completely setup the cache leaves and
  325. * shared_cpu_map or it may leave it partially setup.
  326. */
  327. ret = populate_cache_leaves(cpu);
  328. if (ret)
  329. goto free_ci;
  330. update_cpu_map:
  331. /*
  332. * For systems using DT for cache hierarchy, fw_token
  333. * and shared_cpu_map will be set up here only if they are
  334. * not populated already
  335. */
  336. ret = cache_shared_cpu_map_setup(cpu);
  337. if (ret) {
  338. pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
  339. goto free_ci;
  340. }
  341. return 0;
  342. free_ci:
  343. free_cache_attributes(cpu);
  344. return ret;
  345. }
  346. /* pointer to cpuX/cache device */
  347. static DEFINE_PER_CPU(struct device *, ci_cache_dev);
  348. #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
  349. static cpumask_t cache_dev_map;
  350. /* pointer to array of devices for cpuX/cache/indexY */
  351. static DEFINE_PER_CPU(struct device **, ci_index_dev);
  352. #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
  353. #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
  354. #define show_one(file_name, object) \
  355. static ssize_t file_name##_show(struct device *dev, \
  356. struct device_attribute *attr, char *buf) \
  357. { \
  358. struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
  359. return sysfs_emit(buf, "%u\n", this_leaf->object); \
  360. }
  361. show_one(id, id);
  362. show_one(level, level);
  363. show_one(coherency_line_size, coherency_line_size);
  364. show_one(number_of_sets, number_of_sets);
  365. show_one(physical_line_partition, physical_line_partition);
  366. show_one(ways_of_associativity, ways_of_associativity);
  367. static ssize_t size_show(struct device *dev,
  368. struct device_attribute *attr, char *buf)
  369. {
  370. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  371. return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
  372. }
  373. static ssize_t shared_cpu_map_show(struct device *dev,
  374. struct device_attribute *attr, char *buf)
  375. {
  376. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  377. const struct cpumask *mask = &this_leaf->shared_cpu_map;
  378. return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
  379. }
  380. static ssize_t shared_cpu_list_show(struct device *dev,
  381. struct device_attribute *attr, char *buf)
  382. {
  383. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  384. const struct cpumask *mask = &this_leaf->shared_cpu_map;
  385. return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
  386. }
  387. static ssize_t type_show(struct device *dev,
  388. struct device_attribute *attr, char *buf)
  389. {
  390. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  391. const char *output;
  392. switch (this_leaf->type) {
  393. case CACHE_TYPE_DATA:
  394. output = "Data";
  395. break;
  396. case CACHE_TYPE_INST:
  397. output = "Instruction";
  398. break;
  399. case CACHE_TYPE_UNIFIED:
  400. output = "Unified";
  401. break;
  402. default:
  403. return -EINVAL;
  404. }
  405. return sysfs_emit(buf, "%s\n", output);
  406. }
  407. static ssize_t allocation_policy_show(struct device *dev,
  408. struct device_attribute *attr, char *buf)
  409. {
  410. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  411. unsigned int ci_attr = this_leaf->attributes;
  412. const char *output;
  413. if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
  414. output = "ReadWriteAllocate";
  415. else if (ci_attr & CACHE_READ_ALLOCATE)
  416. output = "ReadAllocate";
  417. else if (ci_attr & CACHE_WRITE_ALLOCATE)
  418. output = "WriteAllocate";
  419. else
  420. return 0;
  421. return sysfs_emit(buf, "%s\n", output);
  422. }
  423. static ssize_t write_policy_show(struct device *dev,
  424. struct device_attribute *attr, char *buf)
  425. {
  426. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  427. unsigned int ci_attr = this_leaf->attributes;
  428. int n = 0;
  429. if (ci_attr & CACHE_WRITE_THROUGH)
  430. n = sysfs_emit(buf, "WriteThrough\n");
  431. else if (ci_attr & CACHE_WRITE_BACK)
  432. n = sysfs_emit(buf, "WriteBack\n");
  433. return n;
  434. }
  435. static DEVICE_ATTR_RO(id);
  436. static DEVICE_ATTR_RO(level);
  437. static DEVICE_ATTR_RO(type);
  438. static DEVICE_ATTR_RO(coherency_line_size);
  439. static DEVICE_ATTR_RO(ways_of_associativity);
  440. static DEVICE_ATTR_RO(number_of_sets);
  441. static DEVICE_ATTR_RO(size);
  442. static DEVICE_ATTR_RO(allocation_policy);
  443. static DEVICE_ATTR_RO(write_policy);
  444. static DEVICE_ATTR_RO(shared_cpu_map);
  445. static DEVICE_ATTR_RO(shared_cpu_list);
  446. static DEVICE_ATTR_RO(physical_line_partition);
  447. static struct attribute *cache_default_attrs[] = {
  448. &dev_attr_id.attr,
  449. &dev_attr_type.attr,
  450. &dev_attr_level.attr,
  451. &dev_attr_shared_cpu_map.attr,
  452. &dev_attr_shared_cpu_list.attr,
  453. &dev_attr_coherency_line_size.attr,
  454. &dev_attr_ways_of_associativity.attr,
  455. &dev_attr_number_of_sets.attr,
  456. &dev_attr_size.attr,
  457. &dev_attr_allocation_policy.attr,
  458. &dev_attr_write_policy.attr,
  459. &dev_attr_physical_line_partition.attr,
  460. NULL
  461. };
  462. static umode_t
  463. cache_default_attrs_is_visible(struct kobject *kobj,
  464. struct attribute *attr, int unused)
  465. {
  466. struct device *dev = kobj_to_dev(kobj);
  467. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  468. const struct cpumask *mask = &this_leaf->shared_cpu_map;
  469. umode_t mode = attr->mode;
  470. if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
  471. return mode;
  472. if ((attr == &dev_attr_type.attr) && this_leaf->type)
  473. return mode;
  474. if ((attr == &dev_attr_level.attr) && this_leaf->level)
  475. return mode;
  476. if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
  477. return mode;
  478. if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
  479. return mode;
  480. if ((attr == &dev_attr_coherency_line_size.attr) &&
  481. this_leaf->coherency_line_size)
  482. return mode;
  483. if ((attr == &dev_attr_ways_of_associativity.attr) &&
  484. this_leaf->size) /* allow 0 = full associativity */
  485. return mode;
  486. if ((attr == &dev_attr_number_of_sets.attr) &&
  487. this_leaf->number_of_sets)
  488. return mode;
  489. if ((attr == &dev_attr_size.attr) && this_leaf->size)
  490. return mode;
  491. if ((attr == &dev_attr_write_policy.attr) &&
  492. (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
  493. return mode;
  494. if ((attr == &dev_attr_allocation_policy.attr) &&
  495. (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
  496. return mode;
  497. if ((attr == &dev_attr_physical_line_partition.attr) &&
  498. this_leaf->physical_line_partition)
  499. return mode;
  500. return 0;
  501. }
  502. static const struct attribute_group cache_default_group = {
  503. .attrs = cache_default_attrs,
  504. .is_visible = cache_default_attrs_is_visible,
  505. };
  506. static const struct attribute_group *cache_default_groups[] = {
  507. &cache_default_group,
  508. NULL,
  509. };
  510. static const struct attribute_group *cache_private_groups[] = {
  511. &cache_default_group,
  512. NULL, /* Place holder for private group */
  513. NULL,
  514. };
  515. const struct attribute_group *
  516. __weak cache_get_priv_group(struct cacheinfo *this_leaf)
  517. {
  518. return NULL;
  519. }
  520. static const struct attribute_group **
  521. cache_get_attribute_groups(struct cacheinfo *this_leaf)
  522. {
  523. const struct attribute_group *priv_group =
  524. cache_get_priv_group(this_leaf);
  525. if (!priv_group)
  526. return cache_default_groups;
  527. if (!cache_private_groups[1])
  528. cache_private_groups[1] = priv_group;
  529. return cache_private_groups;
  530. }
  531. /* Add/Remove cache interface for CPU device */
  532. static void cpu_cache_sysfs_exit(unsigned int cpu)
  533. {
  534. int i;
  535. struct device *ci_dev;
  536. if (per_cpu_index_dev(cpu)) {
  537. for (i = 0; i < cache_leaves(cpu); i++) {
  538. ci_dev = per_cache_index_dev(cpu, i);
  539. if (!ci_dev)
  540. continue;
  541. device_unregister(ci_dev);
  542. }
  543. kfree(per_cpu_index_dev(cpu));
  544. per_cpu_index_dev(cpu) = NULL;
  545. }
  546. device_unregister(per_cpu_cache_dev(cpu));
  547. per_cpu_cache_dev(cpu) = NULL;
  548. }
  549. static int cpu_cache_sysfs_init(unsigned int cpu)
  550. {
  551. struct device *dev = get_cpu_device(cpu);
  552. if (per_cpu_cacheinfo(cpu) == NULL)
  553. return -ENOENT;
  554. per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
  555. if (IS_ERR(per_cpu_cache_dev(cpu)))
  556. return PTR_ERR(per_cpu_cache_dev(cpu));
  557. /* Allocate all required memory */
  558. per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
  559. sizeof(struct device *), GFP_KERNEL);
  560. if (unlikely(per_cpu_index_dev(cpu) == NULL))
  561. goto err_out;
  562. return 0;
  563. err_out:
  564. cpu_cache_sysfs_exit(cpu);
  565. return -ENOMEM;
  566. }
  567. static int cache_add_dev(unsigned int cpu)
  568. {
  569. unsigned int i;
  570. int rc;
  571. struct device *ci_dev, *parent;
  572. struct cacheinfo *this_leaf;
  573. const struct attribute_group **cache_groups;
  574. rc = cpu_cache_sysfs_init(cpu);
  575. if (unlikely(rc < 0))
  576. return rc;
  577. parent = per_cpu_cache_dev(cpu);
  578. for (i = 0; i < cache_leaves(cpu); i++) {
  579. this_leaf = per_cpu_cacheinfo_idx(cpu, i);
  580. if (this_leaf->disable_sysfs)
  581. continue;
  582. if (this_leaf->type == CACHE_TYPE_NOCACHE)
  583. break;
  584. cache_groups = cache_get_attribute_groups(this_leaf);
  585. ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
  586. "index%1u", i);
  587. if (IS_ERR(ci_dev)) {
  588. rc = PTR_ERR(ci_dev);
  589. goto err;
  590. }
  591. per_cache_index_dev(cpu, i) = ci_dev;
  592. }
  593. cpumask_set_cpu(cpu, &cache_dev_map);
  594. return 0;
  595. err:
  596. cpu_cache_sysfs_exit(cpu);
  597. return rc;
  598. }
  599. static int cacheinfo_cpu_online(unsigned int cpu)
  600. {
  601. int rc = detect_cache_attributes(cpu);
  602. if (rc)
  603. return rc;
  604. rc = cache_add_dev(cpu);
  605. if (rc)
  606. free_cache_attributes(cpu);
  607. return rc;
  608. }
  609. static int cacheinfo_cpu_pre_down(unsigned int cpu)
  610. {
  611. if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
  612. cpu_cache_sysfs_exit(cpu);
  613. free_cache_attributes(cpu);
  614. return 0;
  615. }
  616. static int __init cacheinfo_sysfs_init(void)
  617. {
  618. return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
  619. "base/cacheinfo:online",
  620. cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
  621. }
  622. device_initcall(cacheinfo_sysfs_init);