Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpu updates from Ingo Molnar: "The main changes in this cycle were: - Add support for the "Dhyana" x86 CPUs by Hygon: these are licensed based on the AMD Zen architecture, and are built and sold in China, for domestic datacenter use. The code is pretty close to AMD support, mostly with a few quirks and enumeration differences. (Pu Wen) - Enable CPUID support on Cyrix 6x86/6x86L processors" * 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: tools/cpupower: Add Hygon Dhyana support cpufreq: Add Hygon Dhyana support ACPI: Add Hygon Dhyana support x86/xen: Add Hygon Dhyana support to Xen x86/kvm: Add Hygon Dhyana support to KVM x86/mce: Add Hygon Dhyana support to the MCA infrastructure x86/bugs: Add Hygon Dhyana to the respective mitigation machinery x86/apic: Add Hygon Dhyana support x86/pci, x86/amd_nb: Add Hygon Dhyana support to PCI and northbridge x86/amd_nb: Check vendor in AMD-only functions x86/alternative: Init ideal_nops for Hygon Dhyana x86/events: Add Hygon Dhyana support to PMU infrastructure x86/smpboot: Do not use BSP INIT delay and MWAIT to idle on Dhyana x86/cpu/mtrr: Support TOP_MEM2 and get MTRR number x86/cpu: Get cache info and setup cache cpumap for Hygon Dhyana x86/cpu: Create Hygon Dhyana architecture support file x86/CPU: Change query logic so CPUID is enabled before testing x86/CPU: Use correct macros for Cyrix calls
This commit is contained in:
@@ -91,6 +91,12 @@ static void xen_pmu_arch_init(void)
|
||||
k7_counters_mirrored = 0;
|
||||
break;
|
||||
}
|
||||
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
|
||||
amd_num_counters = F10H_NUM_COUNTERS;
|
||||
amd_counters_base = MSR_K7_PERFCTR0;
|
||||
amd_ctrls_base = MSR_K7_EVNTSEL0;
|
||||
amd_msr_step = 1;
|
||||
k7_counters_mirrored = 0;
|
||||
} else {
|
||||
uint32_t eax, ebx, ecx, edx;
|
||||
|
||||
@@ -286,7 +292,7 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
|
||||
|
||||
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
|
||||
if (is_amd_pmu_msr(msr)) {
|
||||
if (!xen_amd_pmu_emulate(msr, val, 1))
|
||||
*val = native_read_msr_safe(msr, err);
|
||||
@@ -309,7 +315,7 @@ bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
|
||||
{
|
||||
uint64_t val = ((uint64_t)high << 32) | low;
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
|
||||
if (is_amd_pmu_msr(msr)) {
|
||||
if (!xen_amd_pmu_emulate(msr, &val, 0))
|
||||
*err = native_write_msr_safe(msr, low, high);
|
||||
@@ -380,7 +386,7 @@ static unsigned long long xen_intel_read_pmc(int counter)
|
||||
|
||||
unsigned long long xen_read_pmc(int counter)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
return xen_amd_read_pmc(counter);
|
||||
else
|
||||
return xen_intel_read_pmc(counter);
|
||||
|
Reference in New Issue
Block a user