numa.c 1.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. *
  4. * ia64 kernel NUMA specific stuff
  5. *
  6. * Copyright (C) 2002 Erich Focht <[email protected]>
  7. * Copyright (C) 2004 Silicon Graphics, Inc.
  8. * Jesse Barnes <[email protected]>
  9. */
  10. #include <linux/topology.h>
  11. #include <linux/module.h>
  12. #include <asm/processor.h>
  13. #include <asm/smp.h>
  14. u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
  15. EXPORT_SYMBOL(cpu_to_node_map);
  16. cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
  17. EXPORT_SYMBOL(node_to_cpu_mask);
  18. void map_cpu_to_node(int cpu, int nid)
  19. {
  20. int oldnid;
  21. if (nid < 0) { /* just initialize by zero */
  22. cpu_to_node_map[cpu] = 0;
  23. return;
  24. }
  25. /* sanity check first */
  26. oldnid = cpu_to_node_map[cpu];
  27. if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) {
  28. return; /* nothing to do */
  29. }
  30. /* we don't have cpu-driven node hot add yet...
  31. In usual case, node is created from SRAT at boot time. */
  32. if (!node_online(nid))
  33. nid = first_online_node;
  34. cpu_to_node_map[cpu] = nid;
  35. cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]);
  36. return;
  37. }
  38. void unmap_cpu_from_node(int cpu, int nid)
  39. {
  40. WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid]));
  41. WARN_ON(cpu_to_node_map[cpu] != nid);
  42. cpu_to_node_map[cpu] = 0;
  43. cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]);
  44. }
  45. /**
  46. * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
  47. *
  48. * Build cpu to node mapping and initialize the per node cpu masks using
  49. * info from the node_cpuid array handed to us by ACPI.
  50. */
  51. void __init build_cpu_to_node_map(void)
  52. {
  53. int cpu, i, node;
  54. for(node=0; node < MAX_NUMNODES; node++)
  55. cpumask_clear(&node_to_cpu_mask[node]);
  56. for_each_possible_early_cpu(cpu) {
  57. node = NUMA_NO_NODE;
  58. for (i = 0; i < NR_CPUS; ++i)
  59. if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
  60. node = node_cpuid[i].nid;
  61. break;
  62. }
  63. map_cpu_to_node(cpu, node);
  64. }
  65. }