cppc.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * cppc.c: CPPC Interface for x86
  4. * Copyright (c) 2016, Intel Corporation.
  5. */
  6. #include <acpi/cppc_acpi.h>
  7. #include <asm/msr.h>
  8. #include <asm/processor.h>
  9. #include <asm/topology.h>
  10. /* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
  11. bool cpc_supported_by_cpu(void)
  12. {
  13. switch (boot_cpu_data.x86_vendor) {
  14. case X86_VENDOR_AMD:
  15. case X86_VENDOR_HYGON:
  16. if (boot_cpu_data.x86 == 0x19 && ((boot_cpu_data.x86_model <= 0x0f) ||
  17. (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
  18. return true;
  19. else if (boot_cpu_data.x86 == 0x17 &&
  20. boot_cpu_data.x86_model >= 0x70 && boot_cpu_data.x86_model <= 0x7f)
  21. return true;
  22. return boot_cpu_has(X86_FEATURE_CPPC);
  23. }
  24. return false;
  25. }
  26. bool cpc_ffh_supported(void)
  27. {
  28. return true;
  29. }
  30. int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
  31. {
  32. int err;
  33. err = rdmsrl_safe_on_cpu(cpunum, reg->address, val);
  34. if (!err) {
  35. u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
  36. reg->bit_offset);
  37. *val &= mask;
  38. *val >>= reg->bit_offset;
  39. }
  40. return err;
  41. }
  42. int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
  43. {
  44. u64 rd_val;
  45. int err;
  46. err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val);
  47. if (!err) {
  48. u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
  49. reg->bit_offset);
  50. val <<= reg->bit_offset;
  51. val &= mask;
  52. rd_val &= ~mask;
  53. rd_val |= val;
  54. err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val);
  55. }
  56. return err;
  57. }
  58. static void amd_set_max_freq_ratio(void)
  59. {
  60. struct cppc_perf_caps perf_caps;
  61. u64 highest_perf, nominal_perf;
  62. u64 perf_ratio;
  63. int rc;
  64. rc = cppc_get_perf_caps(0, &perf_caps);
  65. if (rc) {
  66. pr_debug("Could not retrieve perf counters (%d)\n", rc);
  67. return;
  68. }
  69. highest_perf = amd_get_highest_perf();
  70. nominal_perf = perf_caps.nominal_perf;
  71. if (!highest_perf || !nominal_perf) {
  72. pr_debug("Could not retrieve highest or nominal performance\n");
  73. return;
  74. }
  75. perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
  76. /* midpoint between max_boost and max_P */
  77. perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
  78. if (!perf_ratio) {
  79. pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
  80. return;
  81. }
  82. freq_invariance_set_perf_ratio(perf_ratio, false);
  83. }
  84. static DEFINE_MUTEX(freq_invariance_lock);
  85. void init_freq_invariance_cppc(void)
  86. {
  87. static bool init_done;
  88. if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
  89. return;
  90. if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
  91. return;
  92. mutex_lock(&freq_invariance_lock);
  93. if (!init_done)
  94. amd_set_max_freq_ratio();
  95. init_done = true;
  96. mutex_unlock(&freq_invariance_lock);
  97. }