x2apic_cluster.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/cpuhotplug.h>
  3. #include <linux/cpumask.h>
  4. #include <linux/slab.h>
  5. #include <linux/mm.h>
  6. #include <asm/apic.h>
  7. #include "local.h"
  8. struct cluster_mask {
  9. unsigned int clusterid;
  10. int node;
  11. struct cpumask mask;
  12. };
  13. /*
  14. * __x2apic_send_IPI_mask() possibly needs to read
  15. * x86_cpu_to_logical_apicid for all online cpus in a sequential way.
  16. * Using per cpu variable would cost one cache line per cpu.
  17. */
  18. static u32 *x86_cpu_to_logical_apicid __read_mostly;
  19. static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
  20. static DEFINE_PER_CPU_READ_MOSTLY(struct cluster_mask *, cluster_masks);
  21. static struct cluster_mask *cluster_hotplug_mask;
  22. static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
  23. {
  24. return x2apic_enabled();
  25. }
  26. static void x2apic_send_IPI(int cpu, int vector)
  27. {
  28. u32 dest = x86_cpu_to_logical_apicid[cpu];
  29. /* x2apic MSRs are special and need a special fence: */
  30. weak_wrmsr_fence();
  31. __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
  32. }
  33. static void
  34. __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
  35. {
  36. unsigned int cpu, clustercpu;
  37. struct cpumask *tmpmsk;
  38. unsigned long flags;
  39. u32 dest;
  40. /* x2apic MSRs are special and need a special fence: */
  41. weak_wrmsr_fence();
  42. local_irq_save(flags);
  43. tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
  44. cpumask_copy(tmpmsk, mask);
  45. /* If IPI should not be sent to self, clear current CPU */
  46. if (apic_dest != APIC_DEST_ALLINC)
  47. __cpumask_clear_cpu(smp_processor_id(), tmpmsk);
  48. /* Collapse cpus in a cluster so a single IPI per cluster is sent */
  49. for_each_cpu(cpu, tmpmsk) {
  50. struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
  51. dest = 0;
  52. for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
  53. dest |= x86_cpu_to_logical_apicid[clustercpu];
  54. if (!dest)
  55. continue;
  56. __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
  57. /* Remove cluster CPUs from tmpmask */
  58. cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
  59. }
  60. local_irq_restore(flags);
  61. }
  62. static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
  63. {
  64. __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
  65. }
  66. static void
  67. x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
  68. {
  69. __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
  70. }
  71. static void x2apic_send_IPI_allbutself(int vector)
  72. {
  73. __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT);
  74. }
  75. static void x2apic_send_IPI_all(int vector)
  76. {
  77. __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC);
  78. }
  79. static u32 x2apic_calc_apicid(unsigned int cpu)
  80. {
  81. return x86_cpu_to_logical_apicid[cpu];
  82. }
  83. static void init_x2apic_ldr(void)
  84. {
  85. struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
  86. u32 cluster, apicid = apic_read(APIC_LDR);
  87. unsigned int cpu;
  88. x86_cpu_to_logical_apicid[smp_processor_id()] = apicid;
  89. if (cmsk)
  90. goto update;
  91. cluster = apicid >> 16;
  92. for_each_online_cpu(cpu) {
  93. cmsk = per_cpu(cluster_masks, cpu);
  94. /* Matching cluster found. Link and update it. */
  95. if (cmsk && cmsk->clusterid == cluster)
  96. goto update;
  97. }
  98. cmsk = cluster_hotplug_mask;
  99. cmsk->clusterid = cluster;
  100. cluster_hotplug_mask = NULL;
  101. update:
  102. this_cpu_write(cluster_masks, cmsk);
  103. cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
  104. }
  105. static int alloc_clustermask(unsigned int cpu, int node)
  106. {
  107. if (per_cpu(cluster_masks, cpu))
  108. return 0;
  109. /*
  110. * If a hotplug spare mask exists, check whether it's on the right
  111. * node. If not, free it and allocate a new one.
  112. */
  113. if (cluster_hotplug_mask) {
  114. if (cluster_hotplug_mask->node == node)
  115. return 0;
  116. kfree(cluster_hotplug_mask);
  117. }
  118. cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
  119. GFP_KERNEL, node);
  120. if (!cluster_hotplug_mask)
  121. return -ENOMEM;
  122. cluster_hotplug_mask->node = node;
  123. return 0;
  124. }
  125. static int x2apic_prepare_cpu(unsigned int cpu)
  126. {
  127. if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
  128. return -ENOMEM;
  129. if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
  130. return -ENOMEM;
  131. return 0;
  132. }
  133. static int x2apic_dead_cpu(unsigned int dead_cpu)
  134. {
  135. struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
  136. if (cmsk)
  137. cpumask_clear_cpu(dead_cpu, &cmsk->mask);
  138. free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
  139. return 0;
  140. }
  141. static int x2apic_cluster_probe(void)
  142. {
  143. u32 slots;
  144. if (!x2apic_mode)
  145. return 0;
  146. slots = max_t(u32, L1_CACHE_BYTES/sizeof(u32), nr_cpu_ids);
  147. x86_cpu_to_logical_apicid = kcalloc(slots, sizeof(u32), GFP_KERNEL);
  148. if (!x86_cpu_to_logical_apicid)
  149. return 0;
  150. if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
  151. x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
  152. pr_err("Failed to register X2APIC_PREPARE\n");
  153. kfree(x86_cpu_to_logical_apicid);
  154. x86_cpu_to_logical_apicid = NULL;
  155. return 0;
  156. }
  157. init_x2apic_ldr();
  158. return 1;
  159. }
  160. static struct apic apic_x2apic_cluster __ro_after_init = {
  161. .name = "cluster x2apic",
  162. .probe = x2apic_cluster_probe,
  163. .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
  164. .apic_id_valid = x2apic_apic_id_valid,
  165. .apic_id_registered = x2apic_apic_id_registered,
  166. .delivery_mode = APIC_DELIVERY_MODE_FIXED,
  167. .dest_mode_logical = true,
  168. .disable_esr = 0,
  169. .check_apicid_used = NULL,
  170. .init_apic_ldr = init_x2apic_ldr,
  171. .ioapic_phys_id_map = NULL,
  172. .setup_apic_routing = NULL,
  173. .cpu_present_to_apicid = default_cpu_present_to_apicid,
  174. .apicid_to_cpu_present = NULL,
  175. .check_phys_apicid_present = default_check_phys_apicid_present,
  176. .phys_pkg_id = x2apic_phys_pkg_id,
  177. .get_apic_id = x2apic_get_apic_id,
  178. .set_apic_id = x2apic_set_apic_id,
  179. .calc_dest_apicid = x2apic_calc_apicid,
  180. .send_IPI = x2apic_send_IPI,
  181. .send_IPI_mask = x2apic_send_IPI_mask,
  182. .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
  183. .send_IPI_allbutself = x2apic_send_IPI_allbutself,
  184. .send_IPI_all = x2apic_send_IPI_all,
  185. .send_IPI_self = x2apic_send_IPI_self,
  186. .inquire_remote_apic = NULL,
  187. .read = native_apic_msr_read,
  188. .write = native_apic_msr_write,
  189. .eoi_write = native_apic_msr_eoi_write,
  190. .icr_read = native_x2apic_icr_read,
  191. .icr_write = native_x2apic_icr_write,
  192. .wait_icr_idle = native_x2apic_wait_icr_idle,
  193. .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
  194. };
  195. apic_driver(apic_x2apic_cluster);