Merge branches 'x86/urgent' and 'core/urgent' into x86/boot, to pick up fixes and avoid conflicts
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Этот коммит содержится в:
@@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
|
||||
apic_id = processor->local_apic_id;
|
||||
enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
|
||||
|
||||
/* Ignore invalid ID */
|
||||
if (apic_id == 0xffffffff)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We need to register disabled CPU as well to permit
|
||||
* counting disabled CPUs. This allows us to size
|
||||
|
@@ -116,6 +116,7 @@ static void init_x2apic_ldr(void)
|
||||
goto update;
|
||||
}
|
||||
cmsk = cluster_hotplug_mask;
|
||||
cmsk->clusterid = cluster;
|
||||
cluster_hotplug_mask = NULL;
|
||||
update:
|
||||
this_cpu_write(cluster_masks, cmsk);
|
||||
|
@@ -868,6 +868,11 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
c->x86_power = edx;
|
||||
}
|
||||
|
||||
if (c->extended_cpuid_level >= 0x80000008) {
|
||||
cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
|
||||
c->x86_capability[CPUID_8000_0008_EBX] = ebx;
|
||||
}
|
||||
|
||||
if (c->extended_cpuid_level >= 0x8000000a)
|
||||
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
|
||||
|
||||
@@ -891,7 +896,6 @@ static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
|
||||
|
||||
c->x86_virt_bits = (eax >> 8) & 0xff;
|
||||
c->x86_phys_bits = eax & 0xff;
|
||||
c->x86_capability[CPUID_8000_0008_EBX] = ebx;
|
||||
}
|
||||
#ifdef CONFIG_X86_32
|
||||
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
|
||||
|
@@ -811,6 +811,9 @@ static const struct _tlb_table intel_tlb_table[] = {
|
||||
{ 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
|
||||
{ 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
|
||||
{ 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
|
||||
{ 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
|
||||
{ 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
|
||||
{ 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
|
||||
{ 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
|
||||
{ 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
|
||||
{ 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
|
||||
|
@@ -564,14 +564,12 @@ static int __reload_late(void *info)
|
||||
apply_microcode_local(&err);
|
||||
spin_unlock(&update_lock);
|
||||
|
||||
/* siblings return UCODE_OK because their engine got updated already */
|
||||
if (err > UCODE_NFOUND) {
|
||||
pr_warn("Error reloading microcode on CPU %d\n", cpu);
|
||||
return -1;
|
||||
/* siblings return UCODE_OK because their engine got updated already */
|
||||
ret = -1;
|
||||
} else if (err == UCODE_UPDATED || err == UCODE_OK) {
|
||||
ret = 1;
|
||||
} else {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -485,7 +485,6 @@ static void show_saved_mc(void)
|
||||
*/
|
||||
static void save_mc_for_early(u8 *mc, unsigned int size)
|
||||
{
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/* Synchronization during CPU hotplug. */
|
||||
static DEFINE_MUTEX(x86_cpu_microcode_mutex);
|
||||
|
||||
@@ -495,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size)
|
||||
show_saved_mc();
|
||||
|
||||
mutex_unlock(&x86_cpu_microcode_mutex);
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool load_builtin_intel_microcode(struct cpio_data *cp)
|
||||
|
@@ -104,6 +104,12 @@ static bool __head check_la57_support(unsigned long physaddr)
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Code in __startup_64() can be relocated during execution, but the compiler
|
||||
* doesn't have to generate PC-relative relocations when accessing globals from
|
||||
* that function. Clang actually does not generate them, which leads to
|
||||
* boot-time crashes. To work around this problem, every global pointer must
|
||||
* be adjusted using fixup_pointer().
|
||||
*/
|
||||
unsigned long __head __startup_64(unsigned long physaddr,
|
||||
struct boot_params *bp)
|
||||
{
|
||||
@@ -113,6 +119,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
|
||||
p4dval_t *p4d;
|
||||
pudval_t *pud;
|
||||
pmdval_t *pmd, pmd_entry;
|
||||
pteval_t *mask_ptr;
|
||||
bool la57;
|
||||
int i;
|
||||
unsigned int *next_pgt_ptr;
|
||||
@@ -196,7 +203,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
|
||||
|
||||
pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
|
||||
/* Filter out unsupported __PAGE_KERNEL_* bits: */
|
||||
pmd_entry &= __supported_pte_mask;
|
||||
mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr);
|
||||
pmd_entry &= *mask_ptr;
|
||||
pmd_entry += sme_get_me_mask();
|
||||
pmd_entry += physaddr;
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL2.0
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Jailhouse paravirt_ops implementation
|
||||
*
|
||||
|
@@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
||||
* little bit simple
|
||||
*/
|
||||
efi_map_sz = efi_get_runtime_map_size();
|
||||
efi_map_sz = ALIGN(efi_map_sz, 16);
|
||||
params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
|
||||
MAX_ELFCOREHDR_STR_LEN;
|
||||
params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
|
||||
kbuf.bufsz = params_cmdline_sz + efi_map_sz +
|
||||
kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) +
|
||||
sizeof(struct setup_data) +
|
||||
sizeof(struct efi_setup_data);
|
||||
|
||||
@@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
||||
if (!params)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
efi_map_offset = params_cmdline_sz;
|
||||
efi_setup_data_offset = efi_map_offset + efi_map_sz;
|
||||
efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16);
|
||||
|
||||
/* Copy setup header onto bootparams. Documentation/x86/boot.txt */
|
||||
setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset;
|
||||
|
@@ -370,6 +370,10 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
|
||||
if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
|
||||
return 0;
|
||||
|
||||
/* We should not singlestep on the exception masking instructions */
|
||||
if (insn_masking_exception(insn))
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Only x86_64 has RIP relative instructions */
|
||||
if (insn_rip_relative(insn)) {
|
||||
|
@@ -57,12 +57,17 @@ static void load_segments(void)
|
||||
static void machine_kexec_free_page_tables(struct kimage *image)
|
||||
{
|
||||
free_page((unsigned long)image->arch.pgd);
|
||||
image->arch.pgd = NULL;
|
||||
#ifdef CONFIG_X86_PAE
|
||||
free_page((unsigned long)image->arch.pmd0);
|
||||
image->arch.pmd0 = NULL;
|
||||
free_page((unsigned long)image->arch.pmd1);
|
||||
image->arch.pmd1 = NULL;
|
||||
#endif
|
||||
free_page((unsigned long)image->arch.pte0);
|
||||
image->arch.pte0 = NULL;
|
||||
free_page((unsigned long)image->arch.pte1);
|
||||
image->arch.pte1 = NULL;
|
||||
}
|
||||
|
||||
static int machine_kexec_alloc_page_tables(struct kimage *image)
|
||||
@@ -79,7 +84,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image)
|
||||
!image->arch.pmd0 || !image->arch.pmd1 ||
|
||||
#endif
|
||||
!image->arch.pte0 || !image->arch.pte1) {
|
||||
machine_kexec_free_page_tables(image);
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
|
@@ -39,9 +39,13 @@ const struct kexec_file_ops * const kexec_file_loaders[] = {
|
||||
static void free_transition_pgtable(struct kimage *image)
|
||||
{
|
||||
free_page((unsigned long)image->arch.p4d);
|
||||
image->arch.p4d = NULL;
|
||||
free_page((unsigned long)image->arch.pud);
|
||||
image->arch.pud = NULL;
|
||||
free_page((unsigned long)image->arch.pmd);
|
||||
image->arch.pmd = NULL;
|
||||
free_page((unsigned long)image->arch.pte);
|
||||
image->arch.pte = NULL;
|
||||
}
|
||||
|
||||
static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
|
||||
@@ -91,7 +95,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
|
||||
set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC));
|
||||
return 0;
|
||||
err:
|
||||
free_transition_pgtable(image);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@@ -1,90 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Fallback functions when the main IOMMU code is not compiled in. This
|
||||
code is roughly equivalent to i386. */
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/dma.h>
|
||||
|
||||
#define NOMMU_MAPPING_ERROR 0
|
||||
|
||||
static int
|
||||
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
|
||||
{
|
||||
if (hwdev && !dma_capable(hwdev, bus, size)) {
|
||||
if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
|
||||
printk(KERN_ERR
|
||||
"nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
|
||||
name, (long long)bus, size,
|
||||
(long long)*hwdev->dma_mask);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
dma_addr_t bus = phys_to_dma(dev, page_to_phys(page)) + offset;
|
||||
WARN_ON(size == 0);
|
||||
if (!check_addr("map_single", dev, bus, size))
|
||||
return NOMMU_MAPPING_ERROR;
|
||||
return bus;
|
||||
}
|
||||
|
||||
/* Map a set of buffers described by scatterlist in streaming
|
||||
* mode for DMA. This is the scatter-gather version of the
|
||||
* above pci_map_single interface. Here the scatter gather list
|
||||
* elements are each tagged with the appropriate dma address
|
||||
* and length. They are obtained via sg_dma_{address,length}(SG).
|
||||
*
|
||||
* NOTE: An implementation may be able to use a smaller number of
|
||||
* DMA address/length pairs than there are SG table elements.
|
||||
* (for example via virtual mapping capabilities)
|
||||
* The routine returns the number of addr/length pairs actually
|
||||
* used, at most nents.
|
||||
*
|
||||
* Device ownership issues as mentioned above for pci_map_single are
|
||||
* the same here.
|
||||
*/
|
||||
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
WARN_ON(nents == 0 || sg[0].length == 0);
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
BUG_ON(!sg_page(s));
|
||||
s->dma_address = sg_phys(s);
|
||||
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
|
||||
return 0;
|
||||
s->dma_length = s->length;
|
||||
}
|
||||
return nents;
|
||||
}
|
||||
|
||||
static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == NOMMU_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
const struct dma_map_ops nommu_dma_ops = {
|
||||
.alloc = dma_generic_alloc_coherent,
|
||||
.free = dma_generic_free_coherent,
|
||||
.map_sg = nommu_map_sg,
|
||||
.map_page = nommu_map_page,
|
||||
.is_phys = 1,
|
||||
.mapping_error = nommu_mapping_error,
|
||||
.dma_supported = x86_dma_supported,
|
||||
};
|
@@ -50,6 +50,7 @@
|
||||
#include <linux/init_ohci1394_dma.h>
|
||||
#include <linux/kvm_para.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <xen/xen.h>
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
@@ -534,6 +535,11 @@ static void __init reserve_crashkernel(void)
|
||||
high = true;
|
||||
}
|
||||
|
||||
if (xen_pv_domain()) {
|
||||
pr_info("Ignoring crashkernel for a Xen PV domain\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* 0 means: find the address automatically */
|
||||
if (crash_base <= 0) {
|
||||
/*
|
||||
|
@@ -77,6 +77,8 @@
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/misc.h>
|
||||
#include <asm/qspinlock.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
|
||||
/* representing HT siblings of each logical CPU */
|
||||
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
|
||||
@@ -383,15 +385,47 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
|
||||
*
|
||||
* These are Intel CPUs that enumerate an LLC that is shared by
|
||||
* multiple NUMA nodes. The LLC on these systems is shared for
|
||||
* off-package data access but private to the NUMA node (half
|
||||
* of the package) for on-package access.
|
||||
*
|
||||
* CPUID (the source of the information about the LLC) can only
|
||||
* enumerate the cache as being shared *or* unshared, but not
|
||||
* this particular configuration. The CPU in this case enumerates
|
||||
* the cache to be shared across the entire package (spanning both
|
||||
* NUMA nodes).
|
||||
*/
|
||||
|
||||
static const struct x86_cpu_id snc_cpu[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X },
|
||||
{}
|
||||
};
|
||||
|
||||
static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
{
|
||||
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
|
||||
|
||||
if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
|
||||
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
|
||||
return topology_sane(c, o, "llc");
|
||||
/* Do not match if we do not have a valid APICID for cpu: */
|
||||
if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
|
||||
return false;
|
||||
|
||||
return false;
|
||||
/* Do not match if LLC id does not match: */
|
||||
if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Allow the SNC topology without warning. Return of false
|
||||
* means 'c' does not share the LLC of 'o'. This will be
|
||||
* reflected to userspace.
|
||||
*/
|
||||
if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
|
||||
return false;
|
||||
|
||||
return topology_sane(c, o, "llc");
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -449,7 +483,8 @@ static struct sched_domain_topology_level x86_topology[] = {
|
||||
|
||||
/*
|
||||
* Set if a package/die has multiple NUMA nodes inside.
|
||||
* AMD Magny-Cours and Intel Cluster-on-Die have this.
|
||||
* AMD Magny-Cours, Intel Cluster-on-Die, and Intel
|
||||
* Sub-NUMA Clustering have this.
|
||||
*/
|
||||
static bool x86_has_numa_in_package;
|
||||
|
||||
@@ -1529,6 +1564,8 @@ static inline void mwait_play_dead(void)
|
||||
void *mwait_ptr;
|
||||
int i;
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
return;
|
||||
if (!this_cpu_has(X86_FEATURE_MWAIT))
|
||||
return;
|
||||
if (!this_cpu_has(X86_FEATURE_CLFLUSH))
|
||||
|
@@ -317,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
|
||||
hpet2 -= hpet1;
|
||||
tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
|
||||
do_div(tmp, 1000000);
|
||||
do_div(deltatsc, tmp);
|
||||
deltatsc = div64_u64(deltatsc, tmp);
|
||||
|
||||
return (unsigned long) deltatsc;
|
||||
}
|
||||
@@ -1067,6 +1067,7 @@ static struct clocksource clocksource_tsc_early = {
|
||||
.resume = tsc_resume,
|
||||
.mark_unstable = tsc_cs_mark_unstable,
|
||||
.tick_stable = tsc_cs_tick_stable,
|
||||
.list = LIST_HEAD_INIT(clocksource_tsc_early.list),
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -1086,6 +1087,7 @@ static struct clocksource clocksource_tsc = {
|
||||
.resume = tsc_resume,
|
||||
.mark_unstable = tsc_cs_mark_unstable,
|
||||
.tick_stable = tsc_cs_tick_stable,
|
||||
.list = LIST_HEAD_INIT(clocksource_tsc.list),
|
||||
};
|
||||
|
||||
void mark_tsc_unstable(char *reason)
|
||||
@@ -1098,13 +1100,9 @@ void mark_tsc_unstable(char *reason)
|
||||
clear_sched_clock_stable();
|
||||
disable_sched_clock_irqtime();
|
||||
pr_info("Marking TSC unstable due to %s\n", reason);
|
||||
/* Change only the rating, when not registered */
|
||||
if (clocksource_tsc.mult) {
|
||||
clocksource_mark_unstable(&clocksource_tsc);
|
||||
} else {
|
||||
clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_tsc.rating = 0;
|
||||
}
|
||||
|
||||
clocksource_mark_unstable(&clocksource_tsc_early);
|
||||
clocksource_mark_unstable(&clocksource_tsc);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
||||
@@ -1244,7 +1242,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
|
||||
|
||||
/* Don't bother refining TSC on unstable systems */
|
||||
if (tsc_unstable)
|
||||
return;
|
||||
goto unreg;
|
||||
|
||||
/*
|
||||
* Since the work is started early in boot, we may be
|
||||
@@ -1297,11 +1295,12 @@ static void tsc_refine_calibration_work(struct work_struct *work)
|
||||
|
||||
out:
|
||||
if (tsc_unstable)
|
||||
return;
|
||||
goto unreg;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_ART))
|
||||
art_related_clocksource = &clocksource_tsc;
|
||||
clocksource_register_khz(&clocksource_tsc, tsc_khz);
|
||||
unreg:
|
||||
clocksource_unregister(&clocksource_tsc_early);
|
||||
}
|
||||
|
||||
@@ -1311,8 +1310,8 @@ static int __init init_tsc_clocksource(void)
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
|
||||
return 0;
|
||||
|
||||
if (check_tsc_unstable())
|
||||
return 0;
|
||||
if (tsc_unstable)
|
||||
goto unreg;
|
||||
|
||||
if (tsc_clocksource_reliable)
|
||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
@@ -1328,6 +1327,7 @@ static int __init init_tsc_clocksource(void)
|
||||
if (boot_cpu_has(X86_FEATURE_ART))
|
||||
art_related_clocksource = &clocksource_tsc;
|
||||
clocksource_register_khz(&clocksource_tsc, tsc_khz);
|
||||
unreg:
|
||||
clocksource_unregister(&clocksource_tsc_early);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -299,6 +299,10 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
|
||||
if (is_prefix_bad(insn))
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* We should not singlestep on the exception masking instructions */
|
||||
if (insn_masking_exception(insn))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (x86_64)
|
||||
good_insns = good_insns_64;
|
||||
else
|
||||
|
Ссылка в новой задаче
Block a user