Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar: "Lots of changes in this cycle: - Lots of CPA (change page attribute) optimizations and related cleanups (Thomas Gleixner, Peter Zijstra) - Make lazy TLB mode even lazier (Rik van Riel) - Fault handler cleanups and improvements (Dave Hansen) - kdump, vmcore: Enable kdumping encrypted memory with AMD SME enabled (Lianbo Jiang) - Clean up VM layout documentation (Baoquan He, Ingo Molnar) - ... plus misc other fixes and enhancements" * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (51 commits) x86/stackprotector: Remove the call to boot_init_stack_canary() from cpu_startup_entry() x86/mm: Kill stray kernel fault handling comment x86/mm: Do not warn about PCI BIOS W+X mappings resource: Clean it up a bit resource: Fix find_next_iomem_res() iteration issue resource: Include resource end in walk_*() interfaces x86/kexec: Correct KEXEC_BACKUP_SRC_END off-by-one error x86/mm: Remove spurious fault pkey check x86/mm/vsyscall: Consider vsyscall page part of user address space x86/mm: Add vsyscall address helper x86/mm: Fix exception table comments x86/mm: Add clarifying comments for user addr space x86/mm: Break out user address space handling x86/mm: Break out kernel address space handling x86/mm: Clarify hardware vs. software "error_code" x86/mm/tlb: Make lazy TLB mode lazier x86/mm/tlb: Add freed_tables element to flush_tlb_info x86/mm/tlb: Add freed_tables argument to flush_tlb_mm_range smp,cpumask: introduce on_each_cpu_cond_mask smp: use __cpumask_set_cpu in on_each_cpu_cond ...
This commit is contained in:
@@ -471,6 +471,10 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure that these pages are decrypted if SME is enabled. */
|
||||
if (pages)
|
||||
arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
@@ -867,6 +871,7 @@ static int kimage_load_crash_segment(struct kimage *image,
|
||||
result = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
arch_kexec_post_alloc_pages(page_address(page), 1, 0);
|
||||
ptr = kmap(page);
|
||||
ptr += maddr & ~PAGE_MASK;
|
||||
mchunk = min_t(size_t, mbytes,
|
||||
@@ -884,6 +889,7 @@ static int kimage_load_crash_segment(struct kimage *image,
|
||||
result = copy_from_user(ptr, buf, uchunk);
|
||||
kexec_flush_icache_page(page);
|
||||
kunmap(page);
|
||||
arch_kexec_pre_free_pages(page_address(page), 1);
|
||||
if (result) {
|
||||
result = -EFAULT;
|
||||
goto out;
|
||||
|
@@ -318,33 +318,34 @@ int release_resource(struct resource *old)
|
||||
|
||||
EXPORT_SYMBOL(release_resource);
|
||||
|
||||
/*
|
||||
* Finds the lowest iomem resource existing within [res->start.res->end).
|
||||
* The caller must specify res->start, res->end, res->flags, and optionally
|
||||
* desc. If found, returns 0, res is overwritten, if not found, returns -1.
|
||||
* This function walks the whole tree and not just first level children until
|
||||
* and unless first_level_children_only is true.
|
||||
/**
|
||||
* Finds the lowest iomem resource that covers part of [start..end]. The
|
||||
* caller must specify start, end, flags, and desc (which may be
|
||||
* IORES_DESC_NONE).
|
||||
*
|
||||
* If a resource is found, returns 0 and *res is overwritten with the part
|
||||
* of the resource that's within [start..end]; if none is found, returns
|
||||
* -1.
|
||||
*
|
||||
* This function walks the whole tree and not just first level children
|
||||
* unless @first_lvl is true.
|
||||
*/
|
||||
static int find_next_iomem_res(struct resource *res, unsigned long desc,
|
||||
bool first_level_children_only)
|
||||
static int find_next_iomem_res(resource_size_t start, resource_size_t end,
|
||||
unsigned long flags, unsigned long desc,
|
||||
bool first_lvl, struct resource *res)
|
||||
{
|
||||
resource_size_t start, end;
|
||||
struct resource *p;
|
||||
bool sibling_only = false;
|
||||
|
||||
BUG_ON(!res);
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
start = res->start;
|
||||
end = res->end;
|
||||
BUG_ON(start >= end);
|
||||
|
||||
if (first_level_children_only)
|
||||
sibling_only = true;
|
||||
if (start >= end)
|
||||
return -EINVAL;
|
||||
|
||||
read_lock(&resource_lock);
|
||||
|
||||
for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) {
|
||||
if ((p->flags & res->flags) != res->flags)
|
||||
for (p = iomem_resource.child; p; p = next_resource(p, first_lvl)) {
|
||||
if ((p->flags & flags) != flags)
|
||||
continue;
|
||||
if ((desc != IORES_DESC_NONE) && (desc != p->desc))
|
||||
continue;
|
||||
@@ -352,45 +353,43 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
|
||||
p = NULL;
|
||||
break;
|
||||
}
|
||||
if ((p->end >= start) && (p->start < end))
|
||||
if ((p->end >= start) && (p->start <= end))
|
||||
break;
|
||||
}
|
||||
|
||||
read_unlock(&resource_lock);
|
||||
if (!p)
|
||||
return -1;
|
||||
|
||||
/* copy data */
|
||||
if (res->start < p->start)
|
||||
res->start = p->start;
|
||||
if (res->end > p->end)
|
||||
res->end = p->end;
|
||||
res->start = max(start, p->start);
|
||||
res->end = min(end, p->end);
|
||||
res->flags = p->flags;
|
||||
res->desc = p->desc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
|
||||
bool first_level_children_only,
|
||||
void *arg,
|
||||
static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
|
||||
unsigned long flags, unsigned long desc,
|
||||
bool first_lvl, void *arg,
|
||||
int (*func)(struct resource *, void *))
|
||||
{
|
||||
u64 orig_end = res->end;
|
||||
struct resource res;
|
||||
int ret = -1;
|
||||
|
||||
while ((res->start < res->end) &&
|
||||
!find_next_iomem_res(res, desc, first_level_children_only)) {
|
||||
ret = (*func)(res, arg);
|
||||
while (start < end &&
|
||||
!find_next_iomem_res(start, end, flags, desc, first_lvl, &res)) {
|
||||
ret = (*func)(&res, arg);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
res->start = res->end + 1;
|
||||
res->end = orig_end;
|
||||
start = res.end + 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* Walks through iomem resources and calls func() with matching resource
|
||||
* ranges. This walks through whole tree and not just first level children.
|
||||
* All the memory ranges which overlap start,end and also match flags and
|
||||
@@ -407,13 +406,7 @@ static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
|
||||
int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
|
||||
u64 end, void *arg, int (*func)(struct resource *, void *))
|
||||
{
|
||||
struct resource res;
|
||||
|
||||
res.start = start;
|
||||
res.end = end;
|
||||
res.flags = flags;
|
||||
|
||||
return __walk_iomem_res_desc(&res, desc, false, arg, func);
|
||||
return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
|
||||
|
||||
@@ -425,15 +418,11 @@ EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
|
||||
* ranges.
|
||||
*/
|
||||
int walk_system_ram_res(u64 start, u64 end, void *arg,
|
||||
int (*func)(struct resource *, void *))
|
||||
int (*func)(struct resource *, void *))
|
||||
{
|
||||
struct resource res;
|
||||
unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||
|
||||
res.start = start;
|
||||
res.end = end;
|
||||
res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||
|
||||
return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
|
||||
return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
|
||||
arg, func);
|
||||
}
|
||||
|
||||
@@ -444,13 +433,9 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
|
||||
int walk_mem_res(u64 start, u64 end, void *arg,
|
||||
int (*func)(struct resource *, void *))
|
||||
{
|
||||
struct resource res;
|
||||
unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
|
||||
res.start = start;
|
||||
res.end = end;
|
||||
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
|
||||
return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
|
||||
return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
|
||||
arg, func);
|
||||
}
|
||||
|
||||
@@ -462,27 +447,27 @@ int walk_mem_res(u64 start, u64 end, void *arg,
|
||||
* It is to be used only for System RAM.
|
||||
*/
|
||||
int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
|
||||
void *arg, int (*func)(unsigned long, unsigned long, void *))
|
||||
void *arg, int (*func)(unsigned long, unsigned long, void *))
|
||||
{
|
||||
resource_size_t start, end;
|
||||
unsigned long flags;
|
||||
struct resource res;
|
||||
unsigned long pfn, end_pfn;
|
||||
u64 orig_end;
|
||||
int ret = -1;
|
||||
|
||||
res.start = (u64) start_pfn << PAGE_SHIFT;
|
||||
res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
|
||||
res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||
orig_end = res.end;
|
||||
while ((res.start < res.end) &&
|
||||
(find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) {
|
||||
start = (u64) start_pfn << PAGE_SHIFT;
|
||||
end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
|
||||
flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||
while (start < end &&
|
||||
!find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
|
||||
true, &res)) {
|
||||
pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
end_pfn = (res.end + 1) >> PAGE_SHIFT;
|
||||
if (end_pfn > pfn)
|
||||
ret = (*func)(pfn, end_pfn - pfn, arg);
|
||||
if (ret)
|
||||
break;
|
||||
res.start = res.end + 1;
|
||||
res.end = orig_end;
|
||||
start = res.end + 1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -658,8 +643,8 @@ static int find_resource(struct resource *root, struct resource *new,
|
||||
* @constraint: the size and alignment constraints to be met.
|
||||
*/
|
||||
static int reallocate_resource(struct resource *root, struct resource *old,
|
||||
resource_size_t newsize,
|
||||
struct resource_constraint *constraint)
|
||||
resource_size_t newsize,
|
||||
struct resource_constraint *constraint)
|
||||
{
|
||||
int err=0;
|
||||
struct resource new = *old;
|
||||
@@ -972,7 +957,7 @@ skip:
|
||||
* Existing children of the resource are assumed to be immutable.
|
||||
*/
|
||||
int adjust_resource(struct resource *res, resource_size_t start,
|
||||
resource_size_t size)
|
||||
resource_size_t size)
|
||||
{
|
||||
int result;
|
||||
|
||||
@@ -983,9 +968,9 @@ int adjust_resource(struct resource *res, resource_size_t start,
|
||||
}
|
||||
EXPORT_SYMBOL(adjust_resource);
|
||||
|
||||
static void __init __reserve_region_with_split(struct resource *root,
|
||||
resource_size_t start, resource_size_t end,
|
||||
const char *name)
|
||||
static void __init
|
||||
__reserve_region_with_split(struct resource *root, resource_size_t start,
|
||||
resource_size_t end, const char *name)
|
||||
{
|
||||
struct resource *parent = root;
|
||||
struct resource *conflict;
|
||||
@@ -1044,9 +1029,9 @@ static void __init __reserve_region_with_split(struct resource *root,
|
||||
|
||||
}
|
||||
|
||||
void __init reserve_region_with_split(struct resource *root,
|
||||
resource_size_t start, resource_size_t end,
|
||||
const char *name)
|
||||
void __init
|
||||
reserve_region_with_split(struct resource *root, resource_size_t start,
|
||||
resource_size_t end, const char *name)
|
||||
{
|
||||
int abort = 0;
|
||||
|
||||
@@ -1172,7 +1157,7 @@ EXPORT_SYMBOL(__request_region);
|
||||
* The described resource region must match a currently busy region.
|
||||
*/
|
||||
void __release_region(struct resource *parent, resource_size_t start,
|
||||
resource_size_t n)
|
||||
resource_size_t n)
|
||||
{
|
||||
struct resource **p;
|
||||
resource_size_t end;
|
||||
@@ -1234,7 +1219,7 @@ EXPORT_SYMBOL(__release_region);
|
||||
* simplicity. Enhance this logic when necessary.
|
||||
*/
|
||||
int release_mem_region_adjustable(struct resource *parent,
|
||||
resource_size_t start, resource_size_t size)
|
||||
resource_size_t start, resource_size_t size)
|
||||
{
|
||||
struct resource **p;
|
||||
struct resource *res;
|
||||
@@ -1410,9 +1395,9 @@ static int devm_region_match(struct device *dev, void *res, void *match_data)
|
||||
this->start == match->start && this->n == match->n;
|
||||
}
|
||||
|
||||
struct resource * __devm_request_region(struct device *dev,
|
||||
struct resource *parent, resource_size_t start,
|
||||
resource_size_t n, const char *name)
|
||||
struct resource *
|
||||
__devm_request_region(struct device *dev, struct resource *parent,
|
||||
resource_size_t start, resource_size_t n, const char *name)
|
||||
{
|
||||
struct region_devres *dr = NULL;
|
||||
struct resource *res;
|
||||
|
@@ -347,21 +347,6 @@ EXPORT_SYMBOL_GPL(play_idle);
|
||||
|
||||
void cpu_startup_entry(enum cpuhp_state state)
|
||||
{
|
||||
/*
|
||||
* This #ifdef needs to die, but it's too late in the cycle to
|
||||
* make this generic (ARM and SH have never invoked the canary
|
||||
* init for the non boot CPUs!). Will be fixed in 3.11
|
||||
*/
|
||||
#ifdef CONFIG_X86
|
||||
/*
|
||||
* If we're the non-boot CPU, nothing set the stack canary up
|
||||
* for us. The boot CPU already has it initialized but no harm
|
||||
* in doing it again. This is a good place for updating it, as
|
||||
* we wont ever return from this function (so the invalid
|
||||
* canaries already on the stack wont ever trigger).
|
||||
*/
|
||||
boot_init_stack_canary();
|
||||
#endif
|
||||
arch_cpu_idle_prepare();
|
||||
cpuhp_online_idle(state);
|
||||
while (1)
|
||||
|
@@ -56,7 +56,6 @@
|
||||
#include <linux/profile.h>
|
||||
#include <linux/rcupdate_wait.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/stackprotector.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/swait.h>
|
||||
|
19
kernel/smp.c
19
kernel/smp.c
@@ -669,9 +669,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
|
||||
* You must not call this function with disabled interrupts or
|
||||
* from a hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
|
||||
smp_call_func_t func, void *info, bool wait,
|
||||
gfp_t gfp_flags)
|
||||
gfp_t gfp_flags, const struct cpumask *mask)
|
||||
{
|
||||
cpumask_var_t cpus;
|
||||
int cpu, ret;
|
||||
@@ -680,9 +680,9 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
|
||||
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
|
||||
preempt_disable();
|
||||
for_each_online_cpu(cpu)
|
||||
for_each_cpu(cpu, mask)
|
||||
if (cond_func(cpu, info))
|
||||
cpumask_set_cpu(cpu, cpus);
|
||||
__cpumask_set_cpu(cpu, cpus);
|
||||
on_each_cpu_mask(cpus, func, info, wait);
|
||||
preempt_enable();
|
||||
free_cpumask_var(cpus);
|
||||
@@ -692,7 +692,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
* just have to IPI them one by one.
|
||||
*/
|
||||
preempt_disable();
|
||||
for_each_online_cpu(cpu)
|
||||
for_each_cpu(cpu, mask)
|
||||
if (cond_func(cpu, info)) {
|
||||
ret = smp_call_function_single(cpu, func,
|
||||
info, wait);
|
||||
@@ -701,6 +701,15 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu_cond_mask);
|
||||
|
||||
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
smp_call_func_t func, void *info, bool wait,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
|
||||
cpu_online_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu_cond);
|
||||
|
||||
static void do_nothing(void *unused)
|
||||
|
14
kernel/up.c
14
kernel/up.c
@@ -68,9 +68,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
|
||||
* Preemption is disabled here to make sure the cond_func is called under the
|
||||
* same condtions in UP and SMP.
|
||||
*/
|
||||
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
smp_call_func_t func, void *info, bool wait,
|
||||
gfp_t gfp_flags)
|
||||
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
|
||||
smp_call_func_t func, void *info, bool wait,
|
||||
gfp_t gfp_flags, const struct cpumask *mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -82,6 +82,14 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu_cond_mask);
|
||||
|
||||
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
smp_call_func_t func, void *info, bool wait,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu_cond);
|
||||
|
||||
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
|
||||
|
Reference in New Issue
Block a user