Merge branch 'ras/urgent' into ras/core
Pick up urgent fix as pending patch depends on it.
This commit is contained in:
@@ -14,8 +14,11 @@
|
||||
#include <asm/amd_nb.h>
|
||||
|
||||
#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
|
||||
#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
|
||||
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
|
||||
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
|
||||
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
|
||||
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
|
||||
|
||||
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
|
||||
static DEFINE_MUTEX(smn_mutex);
|
||||
@@ -24,6 +27,7 @@ static u32 *flush_words;
|
||||
|
||||
static const struct pci_device_id amd_root_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
|
||||
{}
|
||||
};
|
||||
|
||||
@@ -39,6 +43,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
|
||||
{}
|
||||
};
|
||||
@@ -51,6 +56,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
|
||||
{}
|
||||
};
|
||||
|
@@ -848,6 +848,11 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
c->x86_power = edx;
|
||||
}
|
||||
|
||||
if (c->extended_cpuid_level >= 0x80000008) {
|
||||
cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
|
||||
c->x86_capability[CPUID_8000_0008_EBX] = ebx;
|
||||
}
|
||||
|
||||
if (c->extended_cpuid_level >= 0x8000000a)
|
||||
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
|
||||
|
||||
@@ -871,7 +876,6 @@ static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
|
||||
|
||||
c->x86_virt_bits = (eax >> 8) & 0xff;
|
||||
c->x86_phys_bits = eax & 0xff;
|
||||
c->x86_capability[CPUID_8000_0008_EBX] = ebx;
|
||||
}
|
||||
#ifdef CONFIG_X86_32
|
||||
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
|
||||
|
@@ -94,6 +94,11 @@ static struct smca_bank_name smca_names[] = {
|
||||
[SMCA_SMU] = { "smu", "System Management Unit" },
|
||||
};
|
||||
|
||||
static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
|
||||
{
|
||||
[0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
|
||||
};
|
||||
|
||||
const char *smca_get_name(enum smca_bank_types t)
|
||||
{
|
||||
if (t >= N_SMCA_BANK_TYPES)
|
||||
@@ -443,20 +448,26 @@ static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
|
||||
if (!block)
|
||||
return MSR_AMD64_SMCA_MCx_MISC(bank);
|
||||
|
||||
/* Check our cache first: */
|
||||
if (smca_bank_addrs[bank][block] != -1)
|
||||
return smca_bank_addrs[bank][block];
|
||||
|
||||
/*
|
||||
* For SMCA enabled processors, BLKPTR field of the first MISC register
|
||||
* (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
|
||||
*/
|
||||
if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
|
||||
return addr;
|
||||
goto out;
|
||||
|
||||
if (!(low & MCI_CONFIG_MCAX))
|
||||
return addr;
|
||||
goto out;
|
||||
|
||||
if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
|
||||
(low & MASK_BLKPTR_LO))
|
||||
return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
|
||||
addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
|
||||
|
||||
out:
|
||||
smca_bank_addrs[bank][block] = addr;
|
||||
return addr;
|
||||
}
|
||||
|
||||
@@ -468,18 +479,6 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi
|
||||
if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
|
||||
return addr;
|
||||
|
||||
/* Get address from already initialized block. */
|
||||
if (per_cpu(threshold_banks, cpu)) {
|
||||
struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank];
|
||||
|
||||
if (bankp && bankp->blocks) {
|
||||
struct threshold_block *blockp = &bankp->blocks[block];
|
||||
|
||||
if (blockp)
|
||||
return blockp->address;
|
||||
}
|
||||
}
|
||||
|
||||
if (mce_flags.smca)
|
||||
return smca_get_block_address(cpu, bank, block);
|
||||
|
||||
|
@@ -457,7 +457,7 @@ static void __init sev_map_percpu_data(void)
|
||||
static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
native_smp_prepare_cpus(max_cpus);
|
||||
if (kvm_para_has_hint(KVM_HINTS_DEDICATED))
|
||||
if (kvm_para_has_hint(KVM_HINTS_REALTIME))
|
||||
static_branch_disable(&virt_spin_lock_key);
|
||||
}
|
||||
|
||||
@@ -553,7 +553,7 @@ static void __init kvm_guest_init(void)
|
||||
}
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_DEDICATED) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
|
||||
pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
|
||||
|
||||
@@ -649,7 +649,7 @@ static __init int kvm_setup_pv_tlb_flush(void)
|
||||
int cpu;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_DEDICATED) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
|
||||
@@ -745,7 +745,7 @@ void __init kvm_spinlock_init(void)
|
||||
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
||||
return;
|
||||
|
||||
if (kvm_para_has_hint(KVM_HINTS_DEDICATED))
|
||||
if (kvm_para_has_hint(KVM_HINTS_REALTIME))
|
||||
return;
|
||||
|
||||
__pv_init_lock_hash();
|
||||
|
@@ -1067,6 +1067,7 @@ static struct clocksource clocksource_tsc_early = {
|
||||
.resume = tsc_resume,
|
||||
.mark_unstable = tsc_cs_mark_unstable,
|
||||
.tick_stable = tsc_cs_tick_stable,
|
||||
.list = LIST_HEAD_INIT(clocksource_tsc_early.list),
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -1086,6 +1087,7 @@ static struct clocksource clocksource_tsc = {
|
||||
.resume = tsc_resume,
|
||||
.mark_unstable = tsc_cs_mark_unstable,
|
||||
.tick_stable = tsc_cs_tick_stable,
|
||||
.list = LIST_HEAD_INIT(clocksource_tsc.list),
|
||||
};
|
||||
|
||||
void mark_tsc_unstable(char *reason)
|
||||
@@ -1098,13 +1100,9 @@ void mark_tsc_unstable(char *reason)
|
||||
clear_sched_clock_stable();
|
||||
disable_sched_clock_irqtime();
|
||||
pr_info("Marking TSC unstable due to %s\n", reason);
|
||||
/* Change only the rating, when not registered */
|
||||
if (clocksource_tsc.mult) {
|
||||
clocksource_mark_unstable(&clocksource_tsc);
|
||||
} else {
|
||||
clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_tsc.rating = 0;
|
||||
}
|
||||
|
||||
clocksource_mark_unstable(&clocksource_tsc_early);
|
||||
clocksource_mark_unstable(&clocksource_tsc);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
||||
@@ -1244,7 +1242,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
|
||||
|
||||
/* Don't bother refining TSC on unstable systems */
|
||||
if (tsc_unstable)
|
||||
return;
|
||||
goto unreg;
|
||||
|
||||
/*
|
||||
* Since the work is started early in boot, we may be
|
||||
@@ -1297,11 +1295,12 @@ static void tsc_refine_calibration_work(struct work_struct *work)
|
||||
|
||||
out:
|
||||
if (tsc_unstable)
|
||||
return;
|
||||
goto unreg;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_ART))
|
||||
art_related_clocksource = &clocksource_tsc;
|
||||
clocksource_register_khz(&clocksource_tsc, tsc_khz);
|
||||
unreg:
|
||||
clocksource_unregister(&clocksource_tsc_early);
|
||||
}
|
||||
|
||||
@@ -1311,8 +1310,8 @@ static int __init init_tsc_clocksource(void)
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
|
||||
return 0;
|
||||
|
||||
if (check_tsc_unstable())
|
||||
return 0;
|
||||
if (tsc_unstable)
|
||||
goto unreg;
|
||||
|
||||
if (tsc_clocksource_reliable)
|
||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
@@ -1328,6 +1327,7 @@ static int __init init_tsc_clocksource(void)
|
||||
if (boot_cpu_has(X86_FEATURE_ART))
|
||||
art_related_clocksource = &clocksource_tsc;
|
||||
clocksource_register_khz(&clocksource_tsc, tsc_khz);
|
||||
unreg:
|
||||
clocksource_unregister(&clocksource_tsc_early);
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user