cacheinfo.c 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * LoongArch cacheinfo support
  4. *
  5. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  6. */
  7. #include <linux/cacheinfo.h>
  8. #include <linux/topology.h>
  9. #include <asm/bootinfo.h>
  10. #include <asm/cpu-info.h>
  11. int init_cache_level(unsigned int cpu)
  12. {
  13. int cache_present = current_cpu_data.cache_leaves_present;
  14. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  15. this_cpu_ci->num_levels =
  16. current_cpu_data.cache_leaves[cache_present - 1].level;
  17. this_cpu_ci->num_leaves = cache_present;
  18. return 0;
  19. }
  20. static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
  21. struct cacheinfo *sib_leaf)
  22. {
  23. return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE)
  24. && !(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE));
  25. }
  26. static void cache_cpumap_setup(unsigned int cpu)
  27. {
  28. unsigned int index;
  29. struct cacheinfo *this_leaf, *sib_leaf;
  30. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  31. for (index = 0; index < this_cpu_ci->num_leaves; index++) {
  32. unsigned int i;
  33. this_leaf = this_cpu_ci->info_list + index;
  34. /* skip if shared_cpu_map is already populated */
  35. if (!cpumask_empty(&this_leaf->shared_cpu_map))
  36. continue;
  37. cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
  38. for_each_online_cpu(i) {
  39. struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
  40. if (i == cpu || !sib_cpu_ci->info_list ||
  41. (cpu_to_node(i) != cpu_to_node(cpu)))
  42. continue;
  43. sib_leaf = sib_cpu_ci->info_list + index;
  44. if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
  45. cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
  46. cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
  47. }
  48. }
  49. }
  50. }
  51. int populate_cache_leaves(unsigned int cpu)
  52. {
  53. int i, cache_present = current_cpu_data.cache_leaves_present;
  54. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  55. struct cacheinfo *this_leaf = this_cpu_ci->info_list;
  56. struct cache_desc *cd, *cdesc = current_cpu_data.cache_leaves;
  57. for (i = 0; i < cache_present; i++) {
  58. cd = cdesc + i;
  59. this_leaf->type = cd->type;
  60. this_leaf->level = cd->level;
  61. this_leaf->coherency_line_size = cd->linesz;
  62. this_leaf->number_of_sets = cd->sets;
  63. this_leaf->ways_of_associativity = cd->ways;
  64. this_leaf->size = cd->linesz * cd->sets * cd->ways;
  65. this_leaf->priv = &cd->flags;
  66. this_leaf++;
  67. }
  68. cache_cpumap_setup(cpu);
  69. this_cpu_ci->cpu_map_populated = true;
  70. return 0;
  71. }