msr-smp.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/export.h>
  3. #include <linux/preempt.h>
  4. #include <linux/smp.h>
  5. #include <linux/completion.h>
  6. #include <asm/msr.h>
  7. static void __rdmsr_on_cpu(void *info)
  8. {
  9. struct msr_info *rv = info;
  10. struct msr *reg;
  11. int this_cpu = raw_smp_processor_id();
  12. if (rv->msrs)
  13. reg = per_cpu_ptr(rv->msrs, this_cpu);
  14. else
  15. reg = &rv->reg;
  16. rdmsr(rv->msr_no, reg->l, reg->h);
  17. }
  18. static void __wrmsr_on_cpu(void *info)
  19. {
  20. struct msr_info *rv = info;
  21. struct msr *reg;
  22. int this_cpu = raw_smp_processor_id();
  23. if (rv->msrs)
  24. reg = per_cpu_ptr(rv->msrs, this_cpu);
  25. else
  26. reg = &rv->reg;
  27. wrmsr(rv->msr_no, reg->l, reg->h);
  28. }
  29. int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  30. {
  31. int err;
  32. struct msr_info rv;
  33. memset(&rv, 0, sizeof(rv));
  34. rv.msr_no = msr_no;
  35. err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  36. *l = rv.reg.l;
  37. *h = rv.reg.h;
  38. return err;
  39. }
  40. EXPORT_SYMBOL(rdmsr_on_cpu);
  41. int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
  42. {
  43. int err;
  44. struct msr_info rv;
  45. memset(&rv, 0, sizeof(rv));
  46. rv.msr_no = msr_no;
  47. err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  48. *q = rv.reg.q;
  49. return err;
  50. }
  51. EXPORT_SYMBOL(rdmsrl_on_cpu);
  52. int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  53. {
  54. int err;
  55. struct msr_info rv;
  56. memset(&rv, 0, sizeof(rv));
  57. rv.msr_no = msr_no;
  58. rv.reg.l = l;
  59. rv.reg.h = h;
  60. err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  61. return err;
  62. }
  63. EXPORT_SYMBOL(wrmsr_on_cpu);
  64. int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
  65. {
  66. int err;
  67. struct msr_info rv;
  68. memset(&rv, 0, sizeof(rv));
  69. rv.msr_no = msr_no;
  70. rv.reg.q = q;
  71. err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  72. return err;
  73. }
  74. EXPORT_SYMBOL(wrmsrl_on_cpu);
  75. static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
  76. struct msr *msrs,
  77. void (*msr_func) (void *info))
  78. {
  79. struct msr_info rv;
  80. int this_cpu;
  81. memset(&rv, 0, sizeof(rv));
  82. rv.msrs = msrs;
  83. rv.msr_no = msr_no;
  84. this_cpu = get_cpu();
  85. if (cpumask_test_cpu(this_cpu, mask))
  86. msr_func(&rv);
  87. smp_call_function_many(mask, msr_func, &rv, 1);
  88. put_cpu();
  89. }
  90. /* rdmsr on a bunch of CPUs
  91. *
  92. * @mask: which CPUs
  93. * @msr_no: which MSR
  94. * @msrs: array of MSR values
  95. *
  96. */
  97. void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
  98. {
  99. __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
  100. }
  101. EXPORT_SYMBOL(rdmsr_on_cpus);
  102. /*
  103. * wrmsr on a bunch of CPUs
  104. *
  105. * @mask: which CPUs
  106. * @msr_no: which MSR
  107. * @msrs: array of MSR values
  108. *
  109. */
  110. void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
  111. {
  112. __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
  113. }
  114. EXPORT_SYMBOL(wrmsr_on_cpus);
  115. struct msr_info_completion {
  116. struct msr_info msr;
  117. struct completion done;
  118. };
  119. /* These "safe" variants are slower and should be used when the target MSR
  120. may not actually exist. */
  121. static void __rdmsr_safe_on_cpu(void *info)
  122. {
  123. struct msr_info_completion *rv = info;
  124. rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
  125. complete(&rv->done);
  126. }
  127. static void __wrmsr_safe_on_cpu(void *info)
  128. {
  129. struct msr_info *rv = info;
  130. rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
  131. }
  132. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  133. {
  134. struct msr_info_completion rv;
  135. call_single_data_t csd;
  136. int err;
  137. INIT_CSD(&csd, __rdmsr_safe_on_cpu, &rv);
  138. memset(&rv, 0, sizeof(rv));
  139. init_completion(&rv.done);
  140. rv.msr.msr_no = msr_no;
  141. err = smp_call_function_single_async(cpu, &csd);
  142. if (!err) {
  143. wait_for_completion(&rv.done);
  144. err = rv.msr.err;
  145. }
  146. *l = rv.msr.reg.l;
  147. *h = rv.msr.reg.h;
  148. return err;
  149. }
  150. EXPORT_SYMBOL(rdmsr_safe_on_cpu);
  151. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  152. {
  153. int err;
  154. struct msr_info rv;
  155. memset(&rv, 0, sizeof(rv));
  156. rv.msr_no = msr_no;
  157. rv.reg.l = l;
  158. rv.reg.h = h;
  159. err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
  160. return err ? err : rv.err;
  161. }
  162. EXPORT_SYMBOL(wrmsr_safe_on_cpu);
  163. int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
  164. {
  165. int err;
  166. struct msr_info rv;
  167. memset(&rv, 0, sizeof(rv));
  168. rv.msr_no = msr_no;
  169. rv.reg.q = q;
  170. err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
  171. return err ? err : rv.err;
  172. }
  173. EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
  174. int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
  175. {
  176. u32 low, high;
  177. int err;
  178. err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
  179. *q = (u64)high << 32 | low;
  180. return err;
  181. }
  182. EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
  183. /*
  184. * These variants are significantly slower, but allows control over
  185. * the entire 32-bit GPR set.
  186. */
  187. static void __rdmsr_safe_regs_on_cpu(void *info)
  188. {
  189. struct msr_regs_info *rv = info;
  190. rv->err = rdmsr_safe_regs(rv->regs);
  191. }
  192. static void __wrmsr_safe_regs_on_cpu(void *info)
  193. {
  194. struct msr_regs_info *rv = info;
  195. rv->err = wrmsr_safe_regs(rv->regs);
  196. }
  197. int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  198. {
  199. int err;
  200. struct msr_regs_info rv;
  201. rv.regs = regs;
  202. rv.err = -EIO;
  203. err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
  204. return err ? err : rv.err;
  205. }
  206. EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
  207. int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  208. {
  209. int err;
  210. struct msr_regs_info rv;
  211. rv.regs = regs;
  212. rv.err = -EIO;
  213. err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
  214. return err ? err : rv.err;
  215. }
  216. EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);