Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 core updates from Ingo Molnar: "Note that in this cycle most of the x86 topics interacted at a level that caused them to be merged into tip:x86/asm - but this should be a temporary phenomenon, hopefully we'll back to the usual patterns in the next merge window. The main changes in this cycle were: Hardware enablement: - Add support for the Intel UMIP (User Mode Instruction Prevention) CPU feature. This is a security feature that disables certain instructions such as SGDT, SLDT, SIDT, SMSW and STR. (Ricardo Neri) [ Note that this is disabled by default for now, there are some smaller enhancements in the pipeline that I'll follow up with in the next 1-2 days, which allows this to be enabled by default.] - Add support for the AMD SEV (Secure Encrypted Virtualization) CPU feature, on top of SME (Secure Memory Encryption) support that was added in v4.14. (Tom Lendacky, Brijesh Singh) - Enable new SSE/AVX/AVX512 CPU features: AVX512_VBMI2, GFNI, VAES, VPCLMULQDQ, AVX512_VNNI, AVX512_BITALG. (Gayatri Kammela) Other changes: - A big series of entry code simplifications and enhancements (Andy Lutomirski) - Make the ORC unwinder default on x86 and various objtool enhancements. (Josh Poimboeuf) - 5-level paging enhancements (Kirill A. Shutemov) - Micro-optimize the entry code a bit (Borislav Petkov) - Improve the handling of interdependent CPU features in the early FPU init code (Andi Kleen) - Build system enhancements (Changbin Du, Masahiro Yamada) - ... plus misc enhancements, fixes and cleanups" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (118 commits) x86/build: Make the boot image generation less verbose selftests/x86: Add tests for the STR and SLDT instructions selftests/x86: Add tests for User-Mode Instruction Prevention x86/traps: Fix up general protection faults caused by UMIP x86/umip: Enable User-Mode Instruction Prevention at runtime x86/umip: Force a page fault when unable to copy emulated result to user x86/umip: Add emulation code for UMIP instructions x86/cpufeature: Add User-Mode Instruction Prevention definitions x86/insn-eval: Add support to resolve 16-bit address encodings x86/insn-eval: Handle 32-bit address encodings in virtual-8086 mode x86/insn-eval: Add wrapper function for 32 and 64-bit addresses x86/insn-eval: Add support to resolve 32-bit address encodings x86/insn-eval: Compute linear address in several utility functions resource: Fix resource_size.cocci warnings X86/KVM: Clear encryption attribute when SEV is active X86/KVM: Decrypt shared per-cpu variables when SEV is active percpu: Introduce DEFINE_PER_CPU_DECRYPTED x86: Add support for changing memory encryption attribute in early boot x86/io: Unroll string I/O when SEV is active x86/boot: Add early boot support when running with SEV active ...
This commit is contained in:
@@ -31,6 +31,8 @@
|
||||
* mutex protecting text section modification (dynamic code patching).
|
||||
* some users need to sleep (allocating memory...) while they hold this lock.
|
||||
*
|
||||
* Note: Also protects SMP-alternatives modification on x86.
|
||||
*
|
||||
* NOT exported to modules - patching kernel text is a really delicate matter.
|
||||
*/
|
||||
DEFINE_MUTEX(text_mutex);
|
||||
|
@@ -406,9 +406,10 @@ static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
|
||||
static int locate_mem_hole_callback(struct resource *res, void *arg)
|
||||
{
|
||||
struct kexec_buf *kbuf = (struct kexec_buf *)arg;
|
||||
u64 start = res->start, end = res->end;
|
||||
unsigned long sz = end - start + 1;
|
||||
|
||||
/* Returning 0 will take to next memory range */
|
||||
@@ -437,7 +438,7 @@ static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
|
||||
* func returning non-zero, then zero will be returned.
|
||||
*/
|
||||
int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
|
||||
int (*func)(u64, u64, void *))
|
||||
int (*func)(struct resource *, void *))
|
||||
{
|
||||
if (kbuf->image->type == KEXEC_TYPE_CRASH)
|
||||
return walk_iomem_res_desc(crashk_res.desc,
|
||||
|
@@ -397,9 +397,32 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
|
||||
res->start = p->start;
|
||||
if (res->end > p->end)
|
||||
res->end = p->end;
|
||||
res->flags = p->flags;
|
||||
res->desc = p->desc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
|
||||
bool first_level_children_only,
|
||||
void *arg,
|
||||
int (*func)(struct resource *, void *))
|
||||
{
|
||||
u64 orig_end = res->end;
|
||||
int ret = -1;
|
||||
|
||||
while ((res->start < res->end) &&
|
||||
!find_next_iomem_res(res, desc, first_level_children_only)) {
|
||||
ret = (*func)(res, arg);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
res->start = res->end + 1;
|
||||
res->end = orig_end;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Walks through iomem resources and calls func() with matching resource
|
||||
* ranges. This walks through whole tree and not just first level children.
|
||||
@@ -415,29 +438,15 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
|
||||
* <linux/ioport.h> and set it in 'desc' of a target resource entry.
|
||||
*/
|
||||
int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
|
||||
u64 end, void *arg, int (*func)(u64, u64, void *))
|
||||
u64 end, void *arg, int (*func)(struct resource *, void *))
|
||||
{
|
||||
struct resource res;
|
||||
u64 orig_end;
|
||||
int ret = -1;
|
||||
|
||||
res.start = start;
|
||||
res.end = end;
|
||||
res.flags = flags;
|
||||
orig_end = res.end;
|
||||
|
||||
while ((res.start < res.end) &&
|
||||
(!find_next_iomem_res(&res, desc, false))) {
|
||||
|
||||
ret = (*func)(res.start, res.end, arg);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
res.start = res.end + 1;
|
||||
res.end = orig_end;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return __walk_iomem_res_desc(&res, desc, false, arg, func);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -448,25 +457,33 @@ int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
|
||||
* ranges.
|
||||
*/
|
||||
int walk_system_ram_res(u64 start, u64 end, void *arg,
|
||||
int (*func)(u64, u64, void *))
|
||||
int (*func)(struct resource *, void *))
|
||||
{
|
||||
struct resource res;
|
||||
u64 orig_end;
|
||||
int ret = -1;
|
||||
|
||||
res.start = start;
|
||||
res.end = end;
|
||||
res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||
orig_end = res.end;
|
||||
while ((res.start < res.end) &&
|
||||
(!find_next_iomem_res(&res, IORES_DESC_NONE, true))) {
|
||||
ret = (*func)(res.start, res.end, arg);
|
||||
if (ret)
|
||||
break;
|
||||
res.start = res.end + 1;
|
||||
res.end = orig_end;
|
||||
}
|
||||
return ret;
|
||||
|
||||
return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
|
||||
arg, func);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function calls the @func callback against all memory ranges, which
|
||||
* are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
|
||||
*/
|
||||
int walk_mem_res(u64 start, u64 end, void *arg,
|
||||
int (*func)(struct resource *, void *))
|
||||
{
|
||||
struct resource res;
|
||||
|
||||
res.start = start;
|
||||
res.end = end;
|
||||
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
|
||||
return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
|
||||
arg, func);
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
|
||||
@@ -508,6 +525,7 @@ static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* This generic page_is_ram() returns true if specified address is
|
||||
* registered as System RAM in iomem_resource list.
|
||||
|
Reference in New Issue
Block a user