processor.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2008
  4. * Author(s): Martin Schwidefsky ([email protected])
  5. */
  6. #define KMSG_COMPONENT "cpu"
  7. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  8. #include <linux/stop_machine.h>
  9. #include <linux/bitops.h>
  10. #include <linux/kernel.h>
  11. #include <linux/random.h>
  12. #include <linux/sched/mm.h>
  13. #include <linux/init.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/mm_types.h>
  16. #include <linux/delay.h>
  17. #include <linux/cpu.h>
  18. #include <asm/diag.h>
  19. #include <asm/facility.h>
  20. #include <asm/elf.h>
  21. #include <asm/lowcore.h>
  22. #include <asm/param.h>
  23. #include <asm/sclp.h>
  24. #include <asm/smp.h>
  25. unsigned long __read_mostly elf_hwcap;
  26. char elf_platform[ELF_PLATFORM_SIZE];
  27. struct cpu_info {
  28. unsigned int cpu_mhz_dynamic;
  29. unsigned int cpu_mhz_static;
  30. struct cpuid cpu_id;
  31. };
  32. static DEFINE_PER_CPU(struct cpu_info, cpu_info);
  33. static DEFINE_PER_CPU(int, cpu_relax_retry);
  34. static bool machine_has_cpu_mhz;
  35. void __init cpu_detect_mhz_feature(void)
  36. {
  37. if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
  38. machine_has_cpu_mhz = true;
  39. }
  40. static void update_cpu_mhz(void *arg)
  41. {
  42. unsigned long mhz;
  43. struct cpu_info *c;
  44. mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
  45. c = this_cpu_ptr(&cpu_info);
  46. c->cpu_mhz_dynamic = mhz >> 32;
  47. c->cpu_mhz_static = mhz & 0xffffffff;
  48. }
  49. void s390_update_cpu_mhz(void)
  50. {
  51. s390_adjust_jiffies();
  52. if (machine_has_cpu_mhz)
  53. on_each_cpu(update_cpu_mhz, NULL, 0);
  54. }
  55. void notrace stop_machine_yield(const struct cpumask *cpumask)
  56. {
  57. int cpu, this_cpu;
  58. this_cpu = smp_processor_id();
  59. if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
  60. __this_cpu_write(cpu_relax_retry, 0);
  61. cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
  62. if (cpu >= nr_cpu_ids)
  63. return;
  64. if (arch_vcpu_is_preempted(cpu))
  65. smp_yield_cpu(cpu);
  66. }
  67. }
  68. /*
  69. * cpu_init - initializes state that is per-CPU.
  70. */
  71. void cpu_init(void)
  72. {
  73. struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
  74. get_cpu_id(id);
  75. if (machine_has_cpu_mhz)
  76. update_cpu_mhz(NULL);
  77. mmgrab(&init_mm);
  78. current->active_mm = &init_mm;
  79. BUG_ON(current->mm);
  80. enter_lazy_tlb(&init_mm, current);
  81. }
  82. static void show_facilities(struct seq_file *m)
  83. {
  84. unsigned int bit;
  85. seq_puts(m, "facilities :");
  86. for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
  87. seq_printf(m, " %d", bit);
  88. seq_putc(m, '\n');
  89. }
  90. static void show_cpu_summary(struct seq_file *m, void *v)
  91. {
  92. static const char *hwcap_str[] = {
  93. [HWCAP_NR_ESAN3] = "esan3",
  94. [HWCAP_NR_ZARCH] = "zarch",
  95. [HWCAP_NR_STFLE] = "stfle",
  96. [HWCAP_NR_MSA] = "msa",
  97. [HWCAP_NR_LDISP] = "ldisp",
  98. [HWCAP_NR_EIMM] = "eimm",
  99. [HWCAP_NR_DFP] = "dfp",
  100. [HWCAP_NR_HPAGE] = "edat",
  101. [HWCAP_NR_ETF3EH] = "etf3eh",
  102. [HWCAP_NR_HIGH_GPRS] = "highgprs",
  103. [HWCAP_NR_TE] = "te",
  104. [HWCAP_NR_VXRS] = "vx",
  105. [HWCAP_NR_VXRS_BCD] = "vxd",
  106. [HWCAP_NR_VXRS_EXT] = "vxe",
  107. [HWCAP_NR_GS] = "gs",
  108. [HWCAP_NR_VXRS_EXT2] = "vxe2",
  109. [HWCAP_NR_VXRS_PDE] = "vxp",
  110. [HWCAP_NR_SORT] = "sort",
  111. [HWCAP_NR_DFLT] = "dflt",
  112. [HWCAP_NR_VXRS_PDE2] = "vxp2",
  113. [HWCAP_NR_NNPA] = "nnpa",
  114. [HWCAP_NR_PCI_MIO] = "pcimio",
  115. [HWCAP_NR_SIE] = "sie",
  116. };
  117. int i, cpu;
  118. BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
  119. seq_printf(m, "vendor_id : IBM/S390\n"
  120. "# processors : %i\n"
  121. "bogomips per cpu: %lu.%02lu\n",
  122. num_online_cpus(), loops_per_jiffy/(500000/HZ),
  123. (loops_per_jiffy/(5000/HZ))%100);
  124. seq_printf(m, "max thread id : %d\n", smp_cpu_mtid);
  125. seq_puts(m, "features\t: ");
  126. for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
  127. if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
  128. seq_printf(m, "%s ", hwcap_str[i]);
  129. seq_puts(m, "\n");
  130. show_facilities(m);
  131. show_cacheinfo(m);
  132. for_each_online_cpu(cpu) {
  133. struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
  134. seq_printf(m, "processor %d: "
  135. "version = %02X, "
  136. "identification = %06X, "
  137. "machine = %04X\n",
  138. cpu, id->version, id->ident, id->machine);
  139. }
  140. }
  141. static int __init setup_hwcaps(void)
  142. {
  143. /* instructions named N3, "backported" to esa-mode */
  144. elf_hwcap |= HWCAP_ESAN3;
  145. /* z/Architecture mode active */
  146. elf_hwcap |= HWCAP_ZARCH;
  147. /* store-facility-list-extended */
  148. if (test_facility(7))
  149. elf_hwcap |= HWCAP_STFLE;
  150. /* message-security assist */
  151. if (test_facility(17))
  152. elf_hwcap |= HWCAP_MSA;
  153. /* long-displacement */
  154. if (test_facility(19))
  155. elf_hwcap |= HWCAP_LDISP;
  156. /* extended-immediate */
  157. elf_hwcap |= HWCAP_EIMM;
  158. /* extended-translation facility 3 enhancement */
  159. if (test_facility(22) && test_facility(30))
  160. elf_hwcap |= HWCAP_ETF3EH;
  161. /* decimal floating point & perform floating point operation */
  162. if (test_facility(42) && test_facility(44))
  163. elf_hwcap |= HWCAP_DFP;
  164. /* huge page support */
  165. if (MACHINE_HAS_EDAT1)
  166. elf_hwcap |= HWCAP_HPAGE;
  167. /* 64-bit register support for 31-bit processes */
  168. elf_hwcap |= HWCAP_HIGH_GPRS;
  169. /* transactional execution */
  170. if (MACHINE_HAS_TE)
  171. elf_hwcap |= HWCAP_TE;
  172. /*
  173. * Vector extension can be disabled with the "novx" parameter.
  174. * Use MACHINE_HAS_VX instead of facility bit 129.
  175. */
  176. if (MACHINE_HAS_VX) {
  177. elf_hwcap |= HWCAP_VXRS;
  178. if (test_facility(134))
  179. elf_hwcap |= HWCAP_VXRS_BCD;
  180. if (test_facility(135))
  181. elf_hwcap |= HWCAP_VXRS_EXT;
  182. if (test_facility(148))
  183. elf_hwcap |= HWCAP_VXRS_EXT2;
  184. if (test_facility(152))
  185. elf_hwcap |= HWCAP_VXRS_PDE;
  186. if (test_facility(192))
  187. elf_hwcap |= HWCAP_VXRS_PDE2;
  188. }
  189. if (test_facility(150))
  190. elf_hwcap |= HWCAP_SORT;
  191. if (test_facility(151))
  192. elf_hwcap |= HWCAP_DFLT;
  193. if (test_facility(165))
  194. elf_hwcap |= HWCAP_NNPA;
  195. /* guarded storage */
  196. if (MACHINE_HAS_GS)
  197. elf_hwcap |= HWCAP_GS;
  198. if (MACHINE_HAS_PCI_MIO)
  199. elf_hwcap |= HWCAP_PCI_MIO;
  200. /* virtualization support */
  201. if (sclp.has_sief2)
  202. elf_hwcap |= HWCAP_SIE;
  203. return 0;
  204. }
  205. arch_initcall(setup_hwcaps);
  206. static int __init setup_elf_platform(void)
  207. {
  208. struct cpuid cpu_id;
  209. get_cpu_id(&cpu_id);
  210. add_device_randomness(&cpu_id, sizeof(cpu_id));
  211. switch (cpu_id.machine) {
  212. default: /* Use "z10" as default. */
  213. strcpy(elf_platform, "z10");
  214. break;
  215. case 0x2817:
  216. case 0x2818:
  217. strcpy(elf_platform, "z196");
  218. break;
  219. case 0x2827:
  220. case 0x2828:
  221. strcpy(elf_platform, "zEC12");
  222. break;
  223. case 0x2964:
  224. case 0x2965:
  225. strcpy(elf_platform, "z13");
  226. break;
  227. case 0x3906:
  228. case 0x3907:
  229. strcpy(elf_platform, "z14");
  230. break;
  231. case 0x8561:
  232. case 0x8562:
  233. strcpy(elf_platform, "z15");
  234. break;
  235. case 0x3931:
  236. case 0x3932:
  237. strcpy(elf_platform, "z16");
  238. break;
  239. }
  240. return 0;
  241. }
  242. arch_initcall(setup_elf_platform);
  243. static void show_cpu_topology(struct seq_file *m, unsigned long n)
  244. {
  245. #ifdef CONFIG_SCHED_TOPOLOGY
  246. seq_printf(m, "physical id : %d\n", topology_physical_package_id(n));
  247. seq_printf(m, "core id : %d\n", topology_core_id(n));
  248. seq_printf(m, "book id : %d\n", topology_book_id(n));
  249. seq_printf(m, "drawer id : %d\n", topology_drawer_id(n));
  250. seq_printf(m, "dedicated : %d\n", topology_cpu_dedicated(n));
  251. seq_printf(m, "address : %d\n", smp_cpu_get_cpu_address(n));
  252. seq_printf(m, "siblings : %d\n", cpumask_weight(topology_core_cpumask(n)));
  253. seq_printf(m, "cpu cores : %d\n", topology_booted_cores(n));
  254. #endif /* CONFIG_SCHED_TOPOLOGY */
  255. }
  256. static void show_cpu_ids(struct seq_file *m, unsigned long n)
  257. {
  258. struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
  259. seq_printf(m, "version : %02X\n", id->version);
  260. seq_printf(m, "identification : %06X\n", id->ident);
  261. seq_printf(m, "machine : %04X\n", id->machine);
  262. }
  263. static void show_cpu_mhz(struct seq_file *m, unsigned long n)
  264. {
  265. struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
  266. if (!machine_has_cpu_mhz)
  267. return;
  268. seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
  269. seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static);
  270. }
  271. /*
  272. * show_cpuinfo - Get information on one CPU for use by procfs.
  273. */
  274. static int show_cpuinfo(struct seq_file *m, void *v)
  275. {
  276. unsigned long n = (unsigned long) v - 1;
  277. unsigned long first = cpumask_first(cpu_online_mask);
  278. if (n == first)
  279. show_cpu_summary(m, v);
  280. seq_printf(m, "\ncpu number : %ld\n", n);
  281. show_cpu_topology(m, n);
  282. show_cpu_ids(m, n);
  283. show_cpu_mhz(m, n);
  284. return 0;
  285. }
  286. static inline void *c_update(loff_t *pos)
  287. {
  288. if (*pos)
  289. *pos = cpumask_next(*pos - 1, cpu_online_mask);
  290. else
  291. *pos = cpumask_first(cpu_online_mask);
  292. return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
  293. }
  294. static void *c_start(struct seq_file *m, loff_t *pos)
  295. {
  296. cpus_read_lock();
  297. return c_update(pos);
  298. }
  299. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  300. {
  301. ++*pos;
  302. return c_update(pos);
  303. }
  304. static void c_stop(struct seq_file *m, void *v)
  305. {
  306. cpus_read_unlock();
  307. }
  308. const struct seq_operations cpuinfo_op = {
  309. .start = c_start,
  310. .next = c_next,
  311. .stop = c_stop,
  312. .show = show_cpuinfo,
  313. };
  314. int s390_isolate_bp(void)
  315. {
  316. if (!test_facility(82))
  317. return -EOPNOTSUPP;
  318. set_thread_flag(TIF_ISOLATE_BP);
  319. return 0;
  320. }
  321. EXPORT_SYMBOL(s390_isolate_bp);
  322. int s390_isolate_bp_guest(void)
  323. {
  324. if (!test_facility(82))
  325. return -EOPNOTSUPP;
  326. set_thread_flag(TIF_ISOLATE_BP_GUEST);
  327. return 0;
  328. }
  329. EXPORT_SYMBOL(s390_isolate_bp_guest);