123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384 |
- /* SPDX-License-Identifier: GPL-2.0 */
- #ifndef _ASM_X86_MSR_H
- #define _ASM_X86_MSR_H
- #include "msr-index.h"
- #ifndef __ASSEMBLY__
- #include <asm/asm.h>
- #include <asm/errno.h>
- #include <asm/cpumask.h>
- #include <uapi/asm/msr.h>
- #include <asm/shared/msr.h>
- struct msr_info {
- u32 msr_no;
- struct msr reg;
- struct msr *msrs;
- int err;
- };
- struct msr_regs_info {
- u32 *regs;
- int err;
- };
- struct saved_msr {
- bool valid;
- struct msr_info info;
- };
- struct saved_msrs {
- unsigned int num;
- struct saved_msr *array;
- };
- /*
- * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
- * constraint has different meanings. For i386, "A" means exactly
- * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
- * it means rax *or* rdx.
- */
- #ifdef CONFIG_X86_64
- /* Using 64-bit values saves one instruction clearing the high half of low */
- #define DECLARE_ARGS(val, low, high) unsigned long low, high
- #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
- #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
- #else
- #define DECLARE_ARGS(val, low, high) unsigned long long val
- #define EAX_EDX_VAL(val, low, high) (val)
- #define EAX_EDX_RET(val, low, high) "=A" (val)
- #endif
- /*
- * Be very careful with includes. This header is prone to include loops.
- */
- #include <asm/atomic.h>
- #include <linux/tracepoint-defs.h>
- #ifdef CONFIG_TRACEPOINTS
- DECLARE_TRACEPOINT(read_msr);
- DECLARE_TRACEPOINT(write_msr);
- DECLARE_TRACEPOINT(rdpmc);
- extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
- extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
- extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
- #else
- static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
- static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
- static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
- #endif
- /*
- * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
- * accessors and should not have any tracing or other functionality piggybacking
- * on them - those are *purely* for accessing MSRs and nothing more. So don't even
- * think of extending them - you will be slapped with a stinking trout or a frozen
- * shark will reach you, wherever you are! You've been warned.
- */
- static __always_inline unsigned long long __rdmsr(unsigned int msr)
- {
- DECLARE_ARGS(val, low, high);
- asm volatile("1: rdmsr\n"
- "2:\n"
- _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR)
- : EAX_EDX_RET(val, low, high) : "c" (msr));
- return EAX_EDX_VAL(val, low, high);
- }
- static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
- {
- asm volatile("1: wrmsr\n"
- "2:\n"
- _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
- : : "c" (msr), "a"(low), "d" (high) : "memory");
- }
- #define native_rdmsr(msr, val1, val2) \
- do { \
- u64 __val = __rdmsr((msr)); \
- (void)((val1) = (u32)__val); \
- (void)((val2) = (u32)(__val >> 32)); \
- } while (0)
- #define native_wrmsr(msr, low, high) \
- __wrmsr(msr, low, high)
- #define native_wrmsrl(msr, val) \
- __wrmsr((msr), (u32)((u64)(val)), \
- (u32)((u64)(val) >> 32))
- static inline unsigned long long native_read_msr(unsigned int msr)
- {
- unsigned long long val;
- val = __rdmsr(msr);
- if (tracepoint_enabled(read_msr))
- do_trace_read_msr(msr, val, 0);
- return val;
- }
- static inline unsigned long long native_read_msr_safe(unsigned int msr,
- int *err)
- {
- DECLARE_ARGS(val, low, high);
- asm volatile("1: rdmsr ; xor %[err],%[err]\n"
- "2:\n\t"
- _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
- : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
- : "c" (msr));
- if (tracepoint_enabled(read_msr))
- do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
- return EAX_EDX_VAL(val, low, high);
- }
- /* Can be uninlined because referenced by paravirt */
- static inline void notrace
- native_write_msr(unsigned int msr, u32 low, u32 high)
- {
- __wrmsr(msr, low, high);
- if (tracepoint_enabled(write_msr))
- do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
- }
- /* Can be uninlined because referenced by paravirt */
- static inline int notrace
- native_write_msr_safe(unsigned int msr, u32 low, u32 high)
- {
- int err;
- asm volatile("1: wrmsr ; xor %[err],%[err]\n"
- "2:\n\t"
- _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err])
- : [err] "=a" (err)
- : "c" (msr), "0" (low), "d" (high)
- : "memory");
- if (tracepoint_enabled(write_msr))
- do_trace_write_msr(msr, ((u64)high << 32 | low), err);
- return err;
- }
- extern int rdmsr_safe_regs(u32 regs[8]);
- extern int wrmsr_safe_regs(u32 regs[8]);
- /**
- * rdtsc() - returns the current TSC without ordering constraints
- *
- * rdtsc() returns the result of RDTSC as a 64-bit integer. The
- * only ordering constraint it supplies is the ordering implied by
- * "asm volatile": it will put the RDTSC in the place you expect. The
- * CPU can and will speculatively execute that RDTSC, though, so the
- * results can be non-monotonic if compared on different CPUs.
- */
- static __always_inline unsigned long long rdtsc(void)
- {
- DECLARE_ARGS(val, low, high);
- asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
- return EAX_EDX_VAL(val, low, high);
- }
- /**
- * rdtsc_ordered() - read the current TSC in program order
- *
- * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
- * It is ordered like a load to a global in-memory counter. It should
- * be impossible to observe non-monotonic rdtsc_unordered() behavior
- * across multiple CPUs as long as the TSC is synced.
- */
- static __always_inline unsigned long long rdtsc_ordered(void)
- {
- DECLARE_ARGS(val, low, high);
- /*
- * The RDTSC instruction is not ordered relative to memory
- * access. The Intel SDM and the AMD APM are both vague on this
- * point, but empirically an RDTSC instruction can be
- * speculatively executed before prior loads. An RDTSC
- * immediately after an appropriate barrier appears to be
- * ordered as a normal load, that is, it provides the same
- * ordering guarantees as reading from a global memory location
- * that some other imaginary CPU is updating continuously with a
- * time stamp.
- *
- * Thus, use the preferred barrier on the respective CPU, aiming for
- * RDTSCP as the default.
- */
- asm volatile(ALTERNATIVE_2("rdtsc",
- "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
- "rdtscp", X86_FEATURE_RDTSCP)
- : EAX_EDX_RET(val, low, high)
- /* RDTSCP clobbers ECX with MSR_TSC_AUX. */
- :: "ecx");
- return EAX_EDX_VAL(val, low, high);
- }
- static inline unsigned long long native_read_pmc(int counter)
- {
- DECLARE_ARGS(val, low, high);
- asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
- if (tracepoint_enabled(rdpmc))
- do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
- return EAX_EDX_VAL(val, low, high);
- }
- #ifdef CONFIG_PARAVIRT_XXL
- #include <asm/paravirt.h>
- #else
- #include <linux/errno.h>
- /*
- * Access to machine-specific registers (available on 586 and better only)
- * Note: the rd* operations modify the parameters directly (without using
- * pointer indirection), this allows gcc to optimize better
- */
- #define rdmsr(msr, low, high) \
- do { \
- u64 __val = native_read_msr((msr)); \
- (void)((low) = (u32)__val); \
- (void)((high) = (u32)(__val >> 32)); \
- } while (0)
- static inline void wrmsr(unsigned int msr, u32 low, u32 high)
- {
- native_write_msr(msr, low, high);
- }
- #define rdmsrl(msr, val) \
- ((val) = native_read_msr((msr)))
- static inline void wrmsrl(unsigned int msr, u64 val)
- {
- native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
- }
- /* wrmsr with exception handling */
- static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
- {
- return native_write_msr_safe(msr, low, high);
- }
- /* rdmsr with exception handling */
- #define rdmsr_safe(msr, low, high) \
- ({ \
- int __err; \
- u64 __val = native_read_msr_safe((msr), &__err); \
- (*low) = (u32)__val; \
- (*high) = (u32)(__val >> 32); \
- __err; \
- })
- static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
- {
- int err;
- *p = native_read_msr_safe(msr, &err);
- return err;
- }
- #define rdpmc(counter, low, high) \
- do { \
- u64 _l = native_read_pmc((counter)); \
- (low) = (u32)_l; \
- (high) = (u32)(_l >> 32); \
- } while (0)
- #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
- #endif /* !CONFIG_PARAVIRT_XXL */
- /*
- * 64-bit version of wrmsr_safe():
- */
- static inline int wrmsrl_safe(u32 msr, u64 val)
- {
- return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
- }
- struct msr *msrs_alloc(void);
- void msrs_free(struct msr *msrs);
- int msr_set_bit(u32 msr, u8 bit);
- int msr_clear_bit(u32 msr, u8 bit);
- #ifdef CONFIG_SMP
- int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
- int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
- int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
- int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
- void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
- void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
- int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
- int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
- int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
- int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
- int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
- int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
- #else /* CONFIG_SMP */
- static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
- {
- rdmsr(msr_no, *l, *h);
- return 0;
- }
- static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
- {
- wrmsr(msr_no, l, h);
- return 0;
- }
- static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
- {
- rdmsrl(msr_no, *q);
- return 0;
- }
- static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
- {
- wrmsrl(msr_no, q);
- return 0;
- }
- static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
- struct msr *msrs)
- {
- rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
- }
- static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
- struct msr *msrs)
- {
- wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
- }
- static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
- u32 *l, u32 *h)
- {
- return rdmsr_safe(msr_no, l, h);
- }
- static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
- {
- return wrmsr_safe(msr_no, l, h);
- }
- static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
- {
- return rdmsrl_safe(msr_no, q);
- }
- static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
- {
- return wrmsrl_safe(msr_no, q);
- }
- static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
- {
- return rdmsr_safe_regs(regs);
- }
- static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
- {
- return wrmsr_safe_regs(regs);
- }
- #endif /* CONFIG_SMP */
- #endif /* __ASSEMBLY__ */
- #endif /* _ASM_X86_MSR_H */
|