1
0

Merge branch 'l1tf-final' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Merge L1 Terminal Fault fixes from Thomas Gleixner:
 "L1TF, aka L1 Terminal Fault, is yet another speculative hardware
  engineering trainwreck. It's a hardware vulnerability which allows
  unprivileged speculative access to data which is available in the
  Level 1 Data Cache when the page table entry controlling the virtual
  address, which is used for the access, has the Present bit cleared or
  other reserved bits set.

  If an instruction accesses a virtual address for which the relevant
  page table entry (PTE) has the Present bit cleared or other reserved
  bits set, then speculative execution ignores the invalid PTE and loads
  the referenced data if it is present in the Level 1 Data Cache, as if
  the page referenced by the address bits in the PTE was still present
  and accessible.

  While this is a purely speculative mechanism and the instruction will
  raise a page fault when it is retired eventually, the pure act of
  loading the data and making it available to other speculative
  instructions opens up the opportunity for side channel attacks to
  unprivileged malicious code, similar to the Meltdown attack.

  While Meltdown breaks the user space to kernel space protection, L1TF
  allows to attack any physical memory address in the system and the
  attack works across all protection domains. It allows an attack of SGX
  and also works from inside virtual machines because the speculation
  bypasses the extended page table (EPT) protection mechanism.

  The assoicated CVEs are: CVE-2018-3615, CVE-2018-3620, CVE-2018-3646

  The mitigations provided by this pull request include:

   - Host side protection by inverting the upper address bits of a non
     present page table entry so the entry points to uncacheable memory.

   - Hypervisor protection by flushing L1 Data Cache on VMENTER.

   - SMT (HyperThreading) control knobs, which allow to 'turn off' SMT
     by offlining the sibling CPU threads. The knobs are available on
     the kernel command line and at runtime via sysfs

   - Control knobs for the hypervisor mitigation, related to L1D flush
     and SMT control. The knobs are available on the kernel command line
     and at runtime via sysfs

   - Extensive documentation about L1TF including various degrees of
     mitigations.

  Thanks to all people who have contributed to this in various ways -
  patches, review, testing, backporting - and the fruitful, sometimes
  heated, but at the end constructive discussions.

  There is work in progress to provide other forms of mitigations, which
  might be less horrible performance wise for a particular kind of
  workloads, but this is not yet ready for consumption due to their
  complexity and limitations"

* 'l1tf-final' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (75 commits)
  x86/microcode: Allow late microcode loading with SMT disabled
  tools headers: Synchronise x86 cpufeatures.h for L1TF additions
  x86/mm/kmmio: Make the tracer robust against L1TF
  x86/mm/pat: Make set_memory_np() L1TF safe
  x86/speculation/l1tf: Make pmd/pud_mknotpresent() invert
  x86/speculation/l1tf: Invert all not present mappings
  cpu/hotplug: Fix SMT supported evaluation
  KVM: VMX: Tell the nested hypervisor to skip L1D flush on vmentry
  x86/speculation: Use ARCH_CAPABILITIES to skip L1D flush on vmentry
  x86/speculation: Simplify sysfs report of VMX L1TF vulnerability
  Documentation/l1tf: Remove Yonah processors from not vulnerable list
  x86/KVM/VMX: Don't set l1tf_flush_l1d from vmx_handle_external_intr()
  x86/irq: Let interrupt handlers set kvm_cpu_l1tf_flush_l1d
  x86: Don't include linux/irq.h from asm/hardirq.h
  x86/KVM/VMX: Introduce per-host-cpu analogue of l1tf_flush_l1d
  x86/irq: Demote irq_cpustat_t::__softirq_pending to u16
  x86/KVM/VMX: Move the l1tf_flush_l1d test to vmx_l1d_flush()
  x86/KVM/VMX: Replace 'vmx_l1d_flush_always' with 'vmx_l1d_flush_cond'
  x86/KVM/VMX: Don't set l1tf_flush_l1d to true from vmx_l1d_flush()
  cpu/hotplug: detect SMT disabled by BIOS
  ...
Este cometimento está contido em:
Linus Torvalds
2018-08-14 09:46:06 -07:00
ascendente 781fca5b10 07d981ad4c
cometimento 958f338e96
71 ficheiros modificados com 2186 adições e 260 eliminações

Ver ficheiro

@@ -56,6 +56,7 @@
#include <asm/hypervisor.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/irq_regs.h>
unsigned int num_processors;
@@ -2192,6 +2193,21 @@ static int cpuid_to_apicid[] = {
[0 ... NR_CPUS - 1] = -1,
};
/**
* apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
* @id: APIC ID to check
*/
bool apic_id_is_primary_thread(unsigned int apicid)
{
u32 mask;
if (smp_num_siblings == 1)
return true;
/* Isolate the SMT bit(s) in the APICID and check for 0 */
mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
return !(apicid & mask);
}
/*
* Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
* and cpuid_to_apicid[] synchronized.

Ver ficheiro

@@ -33,6 +33,7 @@
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>

Ver ficheiro

@@ -12,6 +12,7 @@
*/
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/hpet.h>

Ver ficheiro

@@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/compiler.h>

Ver ficheiro

@@ -313,6 +313,13 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
c->cpu_core_id %= cus_per_node;
}
static void amd_get_topology_early(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_TOPOEXT))
smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
}
/*
* Fixup core topology information for
* (1) AMD multi-node processors
@@ -332,7 +339,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
node_id = ecx & 0xff;
smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
if (c->x86 == 0x15)
c->cu_id = ebx & 0xff;
@@ -611,6 +617,7 @@ clear_sev:
static void early_init_amd(struct cpuinfo_x86 *c)
{
u64 value;
u32 dummy;
early_init_amd_mc(c);
@@ -689,6 +696,22 @@ static void early_init_amd(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_E400);
early_detect_mem_encrypt(c);
/* Re-enable TopologyExtensions if switched off by BIOS */
if (c->x86 == 0x15 &&
(c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
if (msr_set_bit(0xc0011005, 54) > 0) {
rdmsrl(0xc0011005, value);
if (value & BIT_64(54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
}
}
}
amd_get_topology_early(c);
}
static void init_amd_k8(struct cpuinfo_x86 *c)
@@ -780,19 +803,6 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
{
u64 value;
/* re-enable TopologyExtensions if switched off by BIOS */
if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
if (msr_set_bit(0xc0011005, 54) > 0) {
rdmsrl(0xc0011005, value);
if (value & BIT_64(54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
}
}
}
/*
* The way access filter has a performance penalty on some workloads.
* Disable it on the affected CPUs.
@@ -856,16 +866,9 @@ static void init_amd(struct cpuinfo_x86 *c)
cpu_detect_cache_sizes(c);
/* Multi core CPU? */
if (c->extended_cpuid_level >= 0x80000008) {
amd_detect_cmp(c);
amd_get_topology(c);
srat_detect_node(c);
}
#ifdef CONFIG_X86_32
detect_ht(c);
#endif
amd_detect_cmp(c);
amd_get_topology(c);
srat_detect_node(c);
init_amd_cacheinfo(c);

Ver ficheiro

@@ -22,15 +22,18 @@
#include <asm/processor-flags.h>
#include <asm/fpu/internal.h>
#include <asm/msr.h>
#include <asm/vmx.h>
#include <asm/paravirt.h>
#include <asm/alternative.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/intel-family.h>
#include <asm/e820/api.h>
#include <asm/hypervisor.h>
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
/*
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
@@ -56,6 +59,12 @@ void __init check_bugs(void)
{
identify_boot_cpu();
/*
* identify_boot_cpu() initialized SMT support information, let the
* core code know.
*/
cpu_smt_check_topology_early();
if (!IS_ENABLED(CONFIG_SMP)) {
pr_info("CPU: ");
print_cpu_info(&boot_cpu_data);
@@ -82,6 +91,8 @@ void __init check_bugs(void)
*/
ssb_select_mitigation();
l1tf_select_mitigation();
#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
@@ -646,8 +657,121 @@ void x86_spec_ctrl_setup_ap(void)
x86_amd_ssb_disable();
}
#undef pr_fmt
#define pr_fmt(fmt) "L1TF: " fmt
/* Default mitigation for L1TF-affected CPUs */
enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
#if IS_ENABLED(CONFIG_KVM_INTEL)
EXPORT_SYMBOL_GPL(l1tf_mitigation);
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
#endif
static void __init l1tf_select_mitigation(void)
{
u64 half_pa;
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return;
switch (l1tf_mitigation) {
case L1TF_MITIGATION_OFF:
case L1TF_MITIGATION_FLUSH_NOWARN:
case L1TF_MITIGATION_FLUSH:
break;
case L1TF_MITIGATION_FLUSH_NOSMT:
case L1TF_MITIGATION_FULL:
cpu_smt_disable(false);
break;
case L1TF_MITIGATION_FULL_FORCE:
cpu_smt_disable(true);
break;
}
#if CONFIG_PGTABLE_LEVELS == 2
pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
return;
#endif
/*
* This is extremely unlikely to happen because almost all
* systems have far more MAX_PA/2 than RAM can be fit into
* DIMM slots.
*/
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
return;
}
setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
}
static int __init l1tf_cmdline(char *str)
{
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return 0;
if (!str)
return -EINVAL;
if (!strcmp(str, "off"))
l1tf_mitigation = L1TF_MITIGATION_OFF;
else if (!strcmp(str, "flush,nowarn"))
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
else if (!strcmp(str, "flush"))
l1tf_mitigation = L1TF_MITIGATION_FLUSH;
else if (!strcmp(str, "flush,nosmt"))
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
else if (!strcmp(str, "full"))
l1tf_mitigation = L1TF_MITIGATION_FULL;
else if (!strcmp(str, "full,force"))
l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
return 0;
}
early_param("l1tf", l1tf_cmdline);
#undef pr_fmt
#ifdef CONFIG_SYSFS
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
#if IS_ENABLED(CONFIG_KVM_INTEL)
static const char *l1tf_vmx_states[] = {
[VMENTER_L1D_FLUSH_AUTO] = "auto",
[VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
[VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
[VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
[VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
[VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
};
static ssize_t l1tf_show_state(char *buf)
{
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
(l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
cpu_smt_control == CPU_SMT_ENABLED))
return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
l1tf_vmx_states[l1tf_vmx_mitigation]);
return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
l1tf_vmx_states[l1tf_vmx_mitigation],
cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
}
#else
static ssize_t l1tf_show_state(char *buf)
{
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
}
#endif
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@@ -676,6 +800,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_SPEC_STORE_BYPASS:
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
case X86_BUG_L1TF:
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
return l1tf_show_state(buf);
break;
default:
break;
}
@@ -702,4 +830,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
{
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
}
ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
}
#endif

Ver ficheiro

@@ -661,33 +661,36 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
}
void detect_ht(struct cpuinfo_x86 *c)
int detect_ht_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
u32 eax, ebx, ecx, edx;
int index_msb, core_bits;
static bool printed;
if (!cpu_has(c, X86_FEATURE_HT))
return;
return -1;
if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
goto out;
return -1;
if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
return;
return -1;
cpuid(1, &eax, &ebx, &ecx, &edx);
smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) {
if (smp_num_siblings == 1)
pr_info_once("CPU0: Hyper-Threading is disabled\n");
goto out;
}
#endif
return 0;
}
if (smp_num_siblings <= 1)
goto out;
void detect_ht(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
int index_msb, core_bits;
if (detect_ht_early(c) < 0)
return;
index_msb = get_count_order(smp_num_siblings);
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
@@ -700,15 +703,6 @@ void detect_ht(struct cpuinfo_x86 *c)
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
((1 << core_bits) - 1);
out:
if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
pr_info("CPU: Physical Processor ID: %d\n",
c->phys_proc_id);
pr_info("CPU: Processor Core ID: %d\n",
c->cpu_core_id);
printed = 1;
}
#endif
}
@@ -987,6 +981,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
{}
};
static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
/* in addition to cpu_no_speculation */
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
{}
};
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 ia32_cap = 0;
@@ -1016,6 +1025,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
return;
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
if (x86_match_cpu(cpu_no_l1tf))
return;
setup_force_cpu_bug(X86_BUG_L1TF);
}
/*

Ver ficheiro

@@ -55,7 +55,9 @@ extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
extern void detect_num_cpu_cores(struct cpuinfo_x86 *c);
extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
extern int detect_extended_topology(struct cpuinfo_x86 *c);
extern int detect_ht_early(struct cpuinfo_x86 *c);
extern void detect_ht(struct cpuinfo_x86 *c);
unsigned int aperfmperf_get_khz(int cpu);

Ver ficheiro

@@ -301,6 +301,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
}
check_mpx_erratum(c);
/*
* Get the number of SMT siblings early from the extended topology
* leaf, if available. Otherwise try the legacy SMT detection.
*/
if (detect_extended_topology_early(c) < 0)
detect_ht_early(c);
}
#ifdef CONFIG_X86_32

Ver ficheiro

@@ -509,12 +509,20 @@ static struct platform_device *microcode_pdev;
static int check_online_cpus(void)
{
if (num_online_cpus() == num_present_cpus())
return 0;
unsigned int cpu;
pr_err("Not all CPUs online, aborting microcode update.\n");
/*
* Make sure all CPUs are online. It's fine for SMT to be disabled if
* all the primary threads are still online.
*/
for_each_present_cpu(cpu) {
if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
pr_err("Not all CPUs online, aborting microcode update.\n");
return -EINVAL;
}
}
return -EINVAL;
return 0;
}
static atomic_t late_cpus_in;

Ver ficheiro

@@ -22,18 +22,10 @@
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
/*
* Check for extended topology enumeration cpuid leaf 0xb and if it
* exists, use it for populating initial_apicid and cpu topology
* detection.
*/
int detect_extended_topology(struct cpuinfo_x86 *c)
int detect_extended_topology_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned int eax, ebx, ecx, edx, sub_index;
unsigned int ht_mask_width, core_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
static bool printed;
unsigned int eax, ebx, ecx, edx;
if (c->cpuid_level < 0xb)
return -1;
@@ -52,10 +44,30 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
* initial apic id, which also represents 32-bit extended x2apic id.
*/
c->initial_apicid = edx;
smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
#endif
return 0;
}
/*
* Check for extended topology enumeration cpuid leaf 0xb and if it
* exists, use it for populating initial_apicid and cpu topology
* detection.
*/
int detect_extended_topology(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned int eax, ebx, ecx, edx, sub_index;
unsigned int ht_mask_width, core_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
if (detect_extended_topology_early(c) < 0)
return -1;
/*
* Populate HT related information from sub-leaf level 0.
*/
cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
@@ -86,15 +98,6 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
if (!printed) {
pr_info("CPU: Physical Processor ID: %d\n",
c->phys_proc_id);
if (c->x86_max_cores > 1)
pr_info("CPU: Processor Core ID: %d\n",
c->cpu_core_id);
printed = 1;
}
#endif
return 0;
}

Ver ficheiro

@@ -10,6 +10,7 @@
#include <asm/fpu/signal.h>
#include <asm/fpu/types.h>
#include <asm/traps.h>
#include <asm/irq_regs.h>
#include <linux/hardirq.h>
#include <linux/pkeys.h>

Ver ficheiro

@@ -1,6 +1,7 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/errno.h>

Ver ficheiro

@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/timex.h>
#include <linux/random.h>
#include <linux/init.h>

Ver ficheiro

@@ -8,6 +8,7 @@
#include <asm/traps.h>
#include <asm/proto.h>
#include <asm/desc.h>
#include <asm/hw_irq.h>
struct idt_data {
unsigned int vector;

Ver ficheiro

@@ -10,6 +10,7 @@
#include <linux/ftrace.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <asm/apic.h>
#include <asm/io_apic.h>

Ver ficheiro

@@ -11,6 +11,7 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/cpu.h>

Ver ficheiro

@@ -11,6 +11,7 @@
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/ftrace.h>

Ver ficheiro

@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/timex.h>
#include <linux/random.h>
#include <linux/kprobes.h>

Ver ficheiro

@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/hypervisor.h>
#include <asm/mem_encrypt.h>

Ver ficheiro

@@ -823,6 +823,12 @@ void __init setup_arch(char **cmdline_p)
memblock_reserve(__pa_symbol(_text),
(unsigned long)__bss_stop - (unsigned long)_text);
/*
* Make sure page 0 is always reserved because on systems with
* L1TF its contents can be leaked to user processes.
*/
memblock_reserve(0, PAGE_SIZE);
early_reserve_initrd();
/*

Ver ficheiro

@@ -261,6 +261,7 @@ __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
inc_irq_stat(irq_resched_count);
kvm_set_cpu_l1tf_flush_l1d();
if (trace_resched_ipi_enabled()) {
/*

Ver ficheiro

@@ -80,6 +80,7 @@
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/spec-ctrl.h>
#include <asm/hw_irq.h>
/* representing HT siblings of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
@@ -270,6 +271,23 @@ static void notrace start_secondary(void *unused)
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
/**
* topology_is_primary_thread - Check whether CPU is the primary SMT thread
* @cpu: CPU to check
*/
bool topology_is_primary_thread(unsigned int cpu)
{
return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
}
/**
* topology_smt_supported - Check whether SMT is supported by the CPUs
*/
bool topology_smt_supported(void)
{
return smp_num_siblings > 1;
}
/**
* topology_phys_to_logical_pkg - Map a physical package id to a logical
*

Ver ficheiro

@@ -12,6 +12,7 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/i8253.h>
#include <linux/time.h>
#include <linux/export.h>