Merge tag 'v5.2-rc7' into rdma.git hmm
Required for dependencies in the next patches.
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Cleancache frontend
|
||||
*
|
||||
@@ -7,8 +8,6 @@
|
||||
*
|
||||
* Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
|
||||
* Author: Dan Magenheimer
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* DMA Pool allocator
|
||||
*
|
||||
@@ -5,10 +6,6 @@
|
||||
* Copyright 2007 Intel Corporation
|
||||
* Author: Matthew Wilcox <willy@linux.intel.com>
|
||||
*
|
||||
* This software may be redistributed and/or modified under the terms of
|
||||
* the GNU General Public License ("GPL") version 2 as published by the
|
||||
* Free Software Foundation.
|
||||
*
|
||||
* This allocator returns small blocks of a given size which are DMA-able by
|
||||
* the given device. It uses the dma_alloc_coherent page allocator to get
|
||||
* new pages, then splits them up into blocks of the required size.
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Frontswap frontend
|
||||
*
|
||||
@@ -7,8 +8,6 @@
|
||||
*
|
||||
* Copyright (C) 2009-2012 Oracle Corp. All rights reserved.
|
||||
* Author: Dan Magenheimer
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2.
|
||||
*/
|
||||
|
||||
#include <linux/mman.h>
|
||||
|
14
mm/hmm.c
14
mm/hmm.c
@@ -1313,9 +1313,8 @@ static void hmm_devmem_ref_release(struct percpu_ref *ref)
|
||||
complete(&devmem->completion);
|
||||
}
|
||||
|
||||
static void hmm_devmem_ref_exit(void *data)
|
||||
static void hmm_devmem_ref_exit(struct percpu_ref *ref)
|
||||
{
|
||||
struct percpu_ref *ref = data;
|
||||
struct hmm_devmem *devmem;
|
||||
|
||||
devmem = container_of(ref, struct hmm_devmem, ref);
|
||||
@@ -1392,10 +1391,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
size = ALIGN(size, PA_SECTION_SIZE);
|
||||
addr = min((unsigned long)iomem_resource.end,
|
||||
(1UL << MAX_PHYSMEM_BITS) - 1);
|
||||
@@ -1434,6 +1429,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
||||
devmem->pagemap.ref = &devmem->ref;
|
||||
devmem->pagemap.data = devmem;
|
||||
devmem->pagemap.kill = hmm_devmem_ref_kill;
|
||||
devmem->pagemap.cleanup = hmm_devmem_ref_exit;
|
||||
|
||||
result = devm_memremap_pages(devmem->device, &devmem->pagemap);
|
||||
if (IS_ERR(result))
|
||||
@@ -1471,11 +1467,6 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
|
||||
&devmem->ref);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
|
||||
devmem->pfn_last = devmem->pfn_first +
|
||||
(resource_size(devmem->resource) >> PAGE_SHIFT);
|
||||
@@ -1488,6 +1479,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
|
||||
devmem->pagemap.ref = &devmem->ref;
|
||||
devmem->pagemap.data = devmem;
|
||||
devmem->pagemap.kill = hmm_devmem_ref_kill;
|
||||
devmem->pagemap.cleanup = hmm_devmem_ref_exit;
|
||||
|
||||
result = devm_memremap_pages(devmem->device, &devmem->pagemap);
|
||||
if (IS_ERR(result))
|
||||
|
@@ -1,8 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2009 Red Hat, Inc.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
29
mm/hugetlb.c
29
mm/hugetlb.c
@@ -1510,16 +1510,29 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
|
||||
|
||||
/*
|
||||
* Dissolve a given free hugepage into free buddy pages. This function does
|
||||
* nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
|
||||
* dissolution fails because a give page is not a free hugepage, or because
|
||||
* free hugepages are fully reserved.
|
||||
* nothing for in-use hugepages and non-hugepages.
|
||||
* This function returns values like below:
|
||||
*
|
||||
* -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
|
||||
* (allocated or reserved.)
|
||||
* 0: successfully dissolved free hugepages or the page is not a
|
||||
* hugepage (considered as already dissolved)
|
||||
*/
|
||||
int dissolve_free_huge_page(struct page *page)
|
||||
{
|
||||
int rc = -EBUSY;
|
||||
|
||||
/* Not to disrupt normal path by vainly holding hugetlb_lock */
|
||||
if (!PageHuge(page))
|
||||
return 0;
|
||||
|
||||
spin_lock(&hugetlb_lock);
|
||||
if (PageHuge(page) && !page_count(page)) {
|
||||
if (!PageHuge(page)) {
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!page_count(page)) {
|
||||
struct page *head = compound_head(page);
|
||||
struct hstate *h = page_hstate(head);
|
||||
int nid = page_to_nid(head);
|
||||
@@ -1564,11 +1577,9 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
|
||||
|
||||
for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
|
||||
page = pfn_to_page(pfn);
|
||||
if (PageHuge(page) && !page_count(page)) {
|
||||
rc = dissolve_free_huge_page(page);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
rc = dissolve_free_huge_page(page);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
@@ -1,9 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* mm/interval_tree.c - interval tree for mapping->i_mmap
|
||||
*
|
||||
* Copyright (C) 2012, Michel Lespinasse <walken@google.com>
|
||||
*
|
||||
* This file is released under the GPL v2.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
@@ -1004,6 +1004,9 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||||
* handled by the anon_vma lock + PG_lock.
|
||||
*/
|
||||
down_write(&mm->mmap_sem);
|
||||
result = SCAN_ANY_PROCESS;
|
||||
if (!mmget_still_valid(mm))
|
||||
goto out;
|
||||
result = hugepage_vma_revalidate(mm, address, &vma);
|
||||
if (result)
|
||||
goto out;
|
||||
|
@@ -1,21 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* mm/kmemleak-test.c
|
||||
*
|
||||
* Copyright (C) 2008 ARM Limited
|
||||
* Written by Catalin Marinas <catalin.marinas@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "kmemleak: " fmt
|
||||
|
@@ -1,23 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* mm/kmemleak.c
|
||||
*
|
||||
* Copyright (C) 2008 ARM Limited
|
||||
* Written by Catalin Marinas <catalin.marinas@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*
|
||||
* For more information on the algorithm and kmemleak usage, please see
|
||||
* Documentation/dev-tools/kmemleak.rst.
|
||||
*
|
||||
|
3
mm/ksm.c
3
mm/ksm.c
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Memory merging support.
|
||||
*
|
||||
@@ -10,8 +11,6 @@
|
||||
* Andrea Arcangeli
|
||||
* Chris Wright
|
||||
* Hugh Dickins
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
@@ -354,7 +354,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
|
||||
}
|
||||
return 0;
|
||||
fail:
|
||||
__memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
|
||||
__memcg_destroy_list_lru_node(memcg_lrus, begin, i);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@@ -691,11 +691,12 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
|
||||
if (mem_cgroup_disabled())
|
||||
return;
|
||||
|
||||
__this_cpu_add(memcg->vmstats_local->stat[idx], val);
|
||||
|
||||
x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
|
||||
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
|
||||
struct mem_cgroup *mi;
|
||||
|
||||
atomic_long_add(x, &memcg->vmstats_local[idx]);
|
||||
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
|
||||
atomic_long_add(x, &mi->vmstats[idx]);
|
||||
x = 0;
|
||||
@@ -745,11 +746,12 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||
__mod_memcg_state(memcg, idx, val);
|
||||
|
||||
/* Update lruvec */
|
||||
__this_cpu_add(pn->lruvec_stat_local->count[idx], val);
|
||||
|
||||
x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
|
||||
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
|
||||
struct mem_cgroup_per_node *pi;
|
||||
|
||||
atomic_long_add(x, &pn->lruvec_stat_local[idx]);
|
||||
for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
|
||||
atomic_long_add(x, &pi->lruvec_stat[idx]);
|
||||
x = 0;
|
||||
@@ -771,11 +773,12 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
|
||||
if (mem_cgroup_disabled())
|
||||
return;
|
||||
|
||||
__this_cpu_add(memcg->vmstats_local->events[idx], count);
|
||||
|
||||
x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
|
||||
if (unlikely(x > MEMCG_CHARGE_BATCH)) {
|
||||
struct mem_cgroup *mi;
|
||||
|
||||
atomic_long_add(x, &memcg->vmevents_local[idx]);
|
||||
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
|
||||
atomic_long_add(x, &mi->vmevents[idx]);
|
||||
x = 0;
|
||||
@@ -790,7 +793,12 @@ static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
|
||||
|
||||
static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
|
||||
{
|
||||
return atomic_long_read(&memcg->vmevents_local[event]);
|
||||
long x = 0;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
x += per_cpu(memcg->vmstats_local->events[event], cpu);
|
||||
return x;
|
||||
}
|
||||
|
||||
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
|
||||
@@ -2191,11 +2199,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
|
||||
long x;
|
||||
|
||||
x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
|
||||
if (x) {
|
||||
atomic_long_add(x, &memcg->vmstats_local[i]);
|
||||
if (x)
|
||||
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
|
||||
atomic_long_add(x, &memcg->vmstats[i]);
|
||||
}
|
||||
|
||||
if (i >= NR_VM_NODE_STAT_ITEMS)
|
||||
continue;
|
||||
@@ -2205,12 +2211,10 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
|
||||
|
||||
pn = mem_cgroup_nodeinfo(memcg, nid);
|
||||
x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
|
||||
if (x) {
|
||||
atomic_long_add(x, &pn->lruvec_stat_local[i]);
|
||||
if (x)
|
||||
do {
|
||||
atomic_long_add(x, &pn->lruvec_stat[i]);
|
||||
} while ((pn = parent_nodeinfo(pn, nid)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2218,11 +2222,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
|
||||
long x;
|
||||
|
||||
x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
|
||||
if (x) {
|
||||
atomic_long_add(x, &memcg->vmevents_local[i]);
|
||||
if (x)
|
||||
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
|
||||
atomic_long_add(x, &memcg->vmevents[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4483,8 +4485,15 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
|
||||
if (!pn)
|
||||
return 1;
|
||||
|
||||
pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
|
||||
if (!pn->lruvec_stat_local) {
|
||||
kfree(pn);
|
||||
return 1;
|
||||
}
|
||||
|
||||
pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
|
||||
if (!pn->lruvec_stat_cpu) {
|
||||
free_percpu(pn->lruvec_stat_local);
|
||||
kfree(pn);
|
||||
return 1;
|
||||
}
|
||||
@@ -4506,6 +4515,7 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
|
||||
return;
|
||||
|
||||
free_percpu(pn->lruvec_stat_cpu);
|
||||
free_percpu(pn->lruvec_stat_local);
|
||||
kfree(pn);
|
||||
}
|
||||
|
||||
@@ -4516,6 +4526,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
|
||||
for_each_node(node)
|
||||
free_mem_cgroup_per_node_info(memcg, node);
|
||||
free_percpu(memcg->vmstats_percpu);
|
||||
free_percpu(memcg->vmstats_local);
|
||||
kfree(memcg);
|
||||
}
|
||||
|
||||
@@ -4544,6 +4555,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
|
||||
if (memcg->id.id < 0)
|
||||
goto fail;
|
||||
|
||||
memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
|
||||
if (!memcg->vmstats_local)
|
||||
goto fail;
|
||||
|
||||
memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
|
||||
if (!memcg->vmstats_percpu)
|
||||
goto fail;
|
||||
|
@@ -1,11 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2008, 2009 Intel Corporation
|
||||
* Authors: Andi Kleen, Fengguang Wu
|
||||
*
|
||||
* This software may be redistributed and/or modified under the terms of
|
||||
* the GNU General Public License ("GPL") version 2 only as published by the
|
||||
* Free Software Foundation.
|
||||
*
|
||||
* High level machine check handler. Handles pages reported by the
|
||||
* hardware as being corrupted usually due to a multi-bit ECC memory or cache
|
||||
* failure.
|
||||
@@ -1733,6 +1730,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
|
||||
if (!ret) {
|
||||
if (set_hwpoison_free_buddy_page(page))
|
||||
num_poisoned_pages_inc();
|
||||
else
|
||||
ret = -EBUSY;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@@ -1857,11 +1856,8 @@ static int soft_offline_in_use_page(struct page *page, int flags)
|
||||
|
||||
static int soft_offline_free_page(struct page *page)
|
||||
{
|
||||
int rc = 0;
|
||||
struct page *head = compound_head(page);
|
||||
int rc = dissolve_free_huge_page(page);
|
||||
|
||||
if (PageHuge(head))
|
||||
rc = dissolve_free_huge_page(page);
|
||||
if (!rc) {
|
||||
if (set_hwpoison_free_buddy_page(page))
|
||||
num_poisoned_pages_inc();
|
||||
|
@@ -306,7 +306,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
|
||||
else {
|
||||
nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
|
||||
*nodes);
|
||||
pol->w.cpuset_mems_allowed = tmp;
|
||||
pol->w.cpuset_mems_allowed = *nodes;
|
||||
}
|
||||
|
||||
if (nodes_empty(tmp))
|
||||
|
@@ -636,11 +636,11 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
|
||||
* is also counted.
|
||||
* Return value: previously mlocked page counts
|
||||
*/
|
||||
static int count_mm_mlocked_page_nr(struct mm_struct *mm,
|
||||
static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
|
||||
unsigned long start, size_t len)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
int count = 0;
|
||||
unsigned long count = 0;
|
||||
|
||||
if (mm == NULL)
|
||||
mm = current->mm;
|
||||
@@ -797,7 +797,8 @@ SYSCALL_DEFINE1(mlockall, int, flags)
|
||||
unsigned long lock_limit;
|
||||
int ret;
|
||||
|
||||
if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)))
|
||||
if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
|
||||
flags == MCL_ONFAULT)
|
||||
return -EINVAL;
|
||||
|
||||
if (!can_do_mlock())
|
||||
|
@@ -245,14 +245,28 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
{
|
||||
/*
|
||||
* If there are parallel threads are doing PTE changes on same range
|
||||
* under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
|
||||
* flush by batching, a thread has stable TLB entry can fail to flush
|
||||
* the TLB by observing pte_none|!pte_dirty, for example so flush TLB
|
||||
* forcefully if we detect parallel PTE batching threads.
|
||||
* under non-exclusive lock (e.g., mmap_sem read-side) but defer TLB
|
||||
* flush by batching, one thread may end up seeing inconsistent PTEs
|
||||
* and result in having stale TLB entries. So flush TLB forcefully
|
||||
* if we detect parallel PTE batching threads.
|
||||
*
|
||||
* However, some syscalls, e.g. munmap(), may free page tables, this
|
||||
* needs force flush everything in the given range. Otherwise this
|
||||
* may result in having stale TLB entries for some architectures,
|
||||
* e.g. aarch64, that could specify flush what level TLB.
|
||||
*/
|
||||
if (mm_tlb_flush_nested(tlb->mm)) {
|
||||
/*
|
||||
* The aarch64 yields better performance with fullmm by
|
||||
* avoiding multiple CPUs spamming TLBI messages at the
|
||||
* same time.
|
||||
*
|
||||
* On x86 non-fullmm doesn't yield significant difference
|
||||
* against fullmm.
|
||||
*/
|
||||
tlb->fullmm = 1;
|
||||
__tlb_reset_range(tlb);
|
||||
__tlb_adjust_range(tlb, start, end - start);
|
||||
tlb->freed_tables = 1;
|
||||
}
|
||||
|
||||
tlb_flush_mmu(tlb);
|
||||
|
@@ -1,12 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* linux/mm/mmu_notifier.c
|
||||
*
|
||||
* Copyright (C) 2008 Qumranet, Inc.
|
||||
* Copyright (C) 2008 SGI
|
||||
* Christoph Lameter <cl@linux.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include <linux/rculist.h>
|
||||
|
@@ -987,8 +987,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
|
||||
/*
|
||||
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
|
||||
*/
|
||||
static void check_panic_on_oom(struct oom_control *oc,
|
||||
enum oom_constraint constraint)
|
||||
static void check_panic_on_oom(struct oom_control *oc)
|
||||
{
|
||||
if (likely(!sysctl_panic_on_oom))
|
||||
return;
|
||||
@@ -998,7 +997,7 @@ static void check_panic_on_oom(struct oom_control *oc,
|
||||
* does not panic for cpuset, mempolicy, or memcg allocation
|
||||
* failures.
|
||||
*/
|
||||
if (constraint != CONSTRAINT_NONE)
|
||||
if (oc->constraint != CONSTRAINT_NONE)
|
||||
return;
|
||||
}
|
||||
/* Do not panic for oom kills triggered by sysrq */
|
||||
@@ -1035,7 +1034,6 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
|
||||
bool out_of_memory(struct oom_control *oc)
|
||||
{
|
||||
unsigned long freed = 0;
|
||||
enum oom_constraint constraint = CONSTRAINT_NONE;
|
||||
|
||||
if (oom_killer_disabled)
|
||||
return false;
|
||||
@@ -1071,10 +1069,10 @@ bool out_of_memory(struct oom_control *oc)
|
||||
* Check if there were limitations on the allocation (only relevant for
|
||||
* NUMA and memcg) that may require different handling.
|
||||
*/
|
||||
constraint = constrained_alloc(oc);
|
||||
if (constraint != CONSTRAINT_MEMORY_POLICY)
|
||||
oc->constraint = constrained_alloc(oc);
|
||||
if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
|
||||
oc->nodemask = NULL;
|
||||
check_panic_on_oom(oc, constraint);
|
||||
check_panic_on_oom(oc);
|
||||
|
||||
if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
|
||||
current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
|
||||
|
@@ -136,7 +136,7 @@ static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
|
||||
|
||||
end_pfn = pfn + count * BITS_PER_BYTE;
|
||||
if (end_pfn > max_pfn)
|
||||
end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
|
||||
end_pfn = max_pfn;
|
||||
|
||||
for (; pfn < end_pfn; pfn++) {
|
||||
bit = pfn % BITMAP_CHUNK_BITS;
|
||||
@@ -181,7 +181,7 @@ static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
|
||||
|
||||
end_pfn = pfn + count * BITS_PER_BYTE;
|
||||
if (end_pfn > max_pfn)
|
||||
end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
|
||||
end_pfn = max_pfn;
|
||||
|
||||
for (; pfn < end_pfn; pfn++) {
|
||||
bit = pfn % BITMAP_CHUNK_BITS;
|
||||
|
@@ -29,10 +29,9 @@
|
||||
static struct bio *get_swap_bio(gfp_t gfp_flags,
|
||||
struct page *page, bio_end_io_t end_io)
|
||||
{
|
||||
int i, nr = hpage_nr_pages(page);
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc(gfp_flags, nr);
|
||||
bio = bio_alloc(gfp_flags, 1);
|
||||
if (bio) {
|
||||
struct block_device *bdev;
|
||||
|
||||
@@ -41,9 +40,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
|
||||
bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
|
||||
bio->bi_end_io = end_io;
|
||||
|
||||
for (i = 0; i < nr; i++)
|
||||
bio_add_page(bio, page + i, PAGE_SIZE, 0);
|
||||
VM_BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE * nr);
|
||||
bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0);
|
||||
}
|
||||
return bio;
|
||||
}
|
||||
|
@@ -1,11 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* mm/percpu-km.c - kernel memory based chunk allocation
|
||||
*
|
||||
* Copyright (C) 2010 SUSE Linux Products GmbH
|
||||
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*
|
||||
* Chunks are allocated as a contiguous kernel memory using gfp
|
||||
* allocation. This is to be used on nommu architectures.
|
||||
*
|
||||
|
@@ -1,11 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* mm/percpu-debug.c
|
||||
*
|
||||
* Copyright (C) 2017 Facebook Inc.
|
||||
* Copyright (C) 2017 Dennis Zhou <dennisz@fb.com>
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*
|
||||
* Prints statistics about the percpu allocator and backing chunks.
|
||||
*/
|
||||
#include <linux/debugfs.h>
|
||||
|
@@ -1,11 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* mm/percpu-vm.c - vmalloc area based chunk allocation
|
||||
*
|
||||
* Copyright (C) 2010 SUSE Linux Products GmbH
|
||||
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*
|
||||
* Chunks are mapped into vmalloc areas and populated page by page.
|
||||
* This is the default chunk allocator.
|
||||
*/
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* mm/percpu.c - percpu memory allocator
|
||||
*
|
||||
@@ -7,8 +8,6 @@
|
||||
* Copyright (C) 2017 Facebook Inc.
|
||||
* Copyright (C) 2017 Dennis Zhou <dennisszhou@gmail.com>
|
||||
*
|
||||
* This file is released under the GPLv2 license.
|
||||
*
|
||||
* The percpu allocator handles both static and dynamic areas. Percpu
|
||||
* areas are allocated in chunks which are divided into units. There is
|
||||
* a 1-to-1 mapping for units to possible cpus. These units are grouped
|
||||
|
@@ -1,13 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* rodata_test.c: functional test for mark_rodata_ro function
|
||||
*
|
||||
* (C) Copyright 2008 Intel Corporation
|
||||
* Author: Arjan van de Ven <arjan@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; version 2
|
||||
* of the License.
|
||||
*/
|
||||
#define pr_fmt(fmt) "rodata_test: " fmt
|
||||
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* This implements the various checks for CONFIG_HARDENED_USERCOPY*,
|
||||
* which are designed to protect kernel memory from needless exposure
|
||||
@@ -6,11 +7,6 @@
|
||||
*
|
||||
* Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
|
||||
* Security Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
|
@@ -1,10 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* mm/userfaultfd.c
|
||||
*
|
||||
* Copyright (C) 2015 Red Hat, Inc.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
18
mm/vmalloc.c
18
mm/vmalloc.c
@@ -913,7 +913,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
|
||||
unsigned long nva_start_addr, unsigned long size,
|
||||
enum fit_type type)
|
||||
{
|
||||
struct vmap_area *lva;
|
||||
struct vmap_area *lva = NULL;
|
||||
|
||||
if (type == FL_FIT_TYPE) {
|
||||
/*
|
||||
@@ -972,7 +972,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
|
||||
if (type != FL_FIT_TYPE) {
|
||||
augment_tree_propagate_from(va);
|
||||
|
||||
if (type == NE_FIT_TYPE)
|
||||
if (lva) /* type == NE_FIT_TYPE */
|
||||
insert_vmap_area_augment(lva, &va->rb_node,
|
||||
&free_vmap_area_root, &free_vmap_area_list);
|
||||
}
|
||||
@@ -2123,9 +2123,9 @@ static inline void set_area_direct_map(const struct vm_struct *area,
|
||||
/* Handle removing and resetting vm mappings related to the vm_struct. */
|
||||
static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
|
||||
{
|
||||
unsigned long addr = (unsigned long)area->addr;
|
||||
unsigned long start = ULONG_MAX, end = 0;
|
||||
int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
|
||||
int flush_dmap = 0;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@@ -2135,8 +2135,8 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
|
||||
* execute permissions, without leaving a RW+X window.
|
||||
*/
|
||||
if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
|
||||
set_memory_nx(addr, area->nr_pages);
|
||||
set_memory_rw(addr, area->nr_pages);
|
||||
set_memory_nx((unsigned long)area->addr, area->nr_pages);
|
||||
set_memory_rw((unsigned long)area->addr, area->nr_pages);
|
||||
}
|
||||
|
||||
remove_vm_area(area->addr);
|
||||
@@ -2160,9 +2160,11 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
|
||||
* the vm_unmap_aliases() flush includes the direct map.
|
||||
*/
|
||||
for (i = 0; i < area->nr_pages; i++) {
|
||||
if (page_address(area->pages[i])) {
|
||||
unsigned long addr = (unsigned long)page_address(area->pages[i]);
|
||||
if (addr) {
|
||||
start = min(addr, start);
|
||||
end = max(addr, end);
|
||||
end = max(addr + PAGE_SIZE, end);
|
||||
flush_dmap = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2172,7 +2174,7 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
|
||||
* reset the direct map permissions to the default.
|
||||
*/
|
||||
set_area_direct_map(area, set_direct_map_invalid_noflush);
|
||||
_vm_unmap_aliases(start, end, 1);
|
||||
_vm_unmap_aliases(start, end, flush_dmap);
|
||||
set_area_direct_map(area, set_direct_map_default_noflush);
|
||||
}
|
||||
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Linux VM pressure
|
||||
*
|
||||
@@ -6,10 +7,6 @@
|
||||
*
|
||||
* Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro,
|
||||
* Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/cgroup.h>
|
||||
|
@@ -1505,7 +1505,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
|
||||
|
||||
list_for_each_entry_safe(page, next, page_list, lru) {
|
||||
if (page_is_file_cache(page) && !PageDirty(page) &&
|
||||
!__PageMovable(page)) {
|
||||
!__PageMovable(page) && !PageUnevictable(page)) {
|
||||
ClearPageActive(page);
|
||||
list_move(&page->lru, &clean_pages);
|
||||
}
|
||||
@@ -1953,8 +1953,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
||||
if (global_reclaim(sc))
|
||||
__count_vm_events(item, nr_reclaimed);
|
||||
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
|
||||
reclaim_stat->recent_rotated[0] = stat.nr_activate[0];
|
||||
reclaim_stat->recent_rotated[1] = stat.nr_activate[1];
|
||||
reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
|
||||
reclaim_stat->recent_rotated[1] += stat.nr_activate[1];
|
||||
|
||||
move_pages_to_lru(lruvec, &page_list);
|
||||
|
||||
|
Reference in New Issue
Block a user