Merge branch 'linus' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -42,9 +42,11 @@ const struct trace_print_flags vmaflag_names[] = {
|
||||
|
||||
void __dump_page(struct page *page, const char *reason)
|
||||
{
|
||||
int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
|
||||
|
||||
pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
|
||||
page, page_ref_count(page), page_mapcount(page),
|
||||
page->mapping, page->index);
|
||||
page, page_ref_count(page), mapcount,
|
||||
page->mapping, page_to_pgoff(page));
|
||||
if (PageCompound(page))
|
||||
pr_cont(" compound_mapcount: %d", compound_mapcount(page));
|
||||
pr_cont("\n");
|
||||
|
@@ -1078,7 +1078,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
||||
goto out;
|
||||
|
||||
page = pmd_page(*pmd);
|
||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||
VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
|
||||
if (flags & FOLL_TOUCH)
|
||||
touch_pmd(vma, addr, pmd);
|
||||
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
|
||||
@@ -1116,7 +1116,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
||||
}
|
||||
skip_mlock:
|
||||
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
|
||||
VM_BUG_ON_PAGE(!PageCompound(page), page);
|
||||
VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
|
||||
if (flags & FOLL_GET)
|
||||
get_page(page);
|
||||
|
||||
|
@@ -838,7 +838,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
|
||||
* value (scan code).
|
||||
*/
|
||||
|
||||
static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
|
||||
static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
|
||||
struct vm_area_struct **vmap)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long hstart, hend;
|
||||
@@ -846,7 +847,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
|
||||
if (unlikely(khugepaged_test_exit(mm)))
|
||||
return SCAN_ANY_PROCESS;
|
||||
|
||||
vma = find_vma(mm, address);
|
||||
*vmap = vma = find_vma(mm, address);
|
||||
if (!vma)
|
||||
return SCAN_VMA_NULL;
|
||||
|
||||
@@ -881,6 +882,11 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
|
||||
.pmd = pmd,
|
||||
};
|
||||
|
||||
/* we only decide to swapin, if there is enough young ptes */
|
||||
if (referenced < HPAGE_PMD_NR/2) {
|
||||
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
|
||||
return false;
|
||||
}
|
||||
fe.pte = pte_offset_map(pmd, address);
|
||||
for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
|
||||
fe.pte++, fe.address += PAGE_SIZE) {
|
||||
@@ -888,17 +894,12 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
|
||||
if (!is_swap_pte(pteval))
|
||||
continue;
|
||||
swapped_in++;
|
||||
/* we only decide to swapin, if there is enough young ptes */
|
||||
if (referenced < HPAGE_PMD_NR/2) {
|
||||
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
|
||||
return false;
|
||||
}
|
||||
ret = do_swap_page(&fe, pteval);
|
||||
|
||||
/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
|
||||
if (ret & VM_FAULT_RETRY) {
|
||||
down_read(&mm->mmap_sem);
|
||||
if (hugepage_vma_revalidate(mm, address)) {
|
||||
if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
|
||||
/* vma is no longer available, don't continue to swapin */
|
||||
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
|
||||
return false;
|
||||
@@ -923,7 +924,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
|
||||
static void collapse_huge_page(struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
struct page **hpage,
|
||||
struct vm_area_struct *vma,
|
||||
int node, int referenced)
|
||||
{
|
||||
pmd_t *pmd, _pmd;
|
||||
@@ -933,6 +933,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||||
spinlock_t *pmd_ptl, *pte_ptl;
|
||||
int isolated = 0, result = 0;
|
||||
struct mem_cgroup *memcg;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long mmun_start; /* For mmu_notifiers */
|
||||
unsigned long mmun_end; /* For mmu_notifiers */
|
||||
gfp_t gfp;
|
||||
@@ -961,7 +962,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||||
}
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
result = hugepage_vma_revalidate(mm, address);
|
||||
result = hugepage_vma_revalidate(mm, address, &vma);
|
||||
if (result) {
|
||||
mem_cgroup_cancel_charge(new_page, memcg, true);
|
||||
up_read(&mm->mmap_sem);
|
||||
@@ -994,7 +995,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||||
* handled by the anon_vma lock + PG_lock.
|
||||
*/
|
||||
down_write(&mm->mmap_sem);
|
||||
result = hugepage_vma_revalidate(mm, address);
|
||||
result = hugepage_vma_revalidate(mm, address, &vma);
|
||||
if (result)
|
||||
goto out;
|
||||
/* check if the pmd is still valid */
|
||||
@@ -1202,7 +1203,7 @@ out_unmap:
|
||||
if (ret) {
|
||||
node = khugepaged_find_target_node();
|
||||
/* collapse_huge_page will return with the mmap_sem released */
|
||||
collapse_huge_page(mm, address, hpage, vma, node, referenced);
|
||||
collapse_huge_page(mm, address, hpage, node, referenced);
|
||||
}
|
||||
out:
|
||||
trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
|
||||
|
@@ -1740,17 +1740,22 @@ static DEFINE_MUTEX(percpu_charge_mutex);
|
||||
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
||||
{
|
||||
struct memcg_stock_pcp *stock;
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
if (nr_pages > CHARGE_BATCH)
|
||||
return ret;
|
||||
|
||||
stock = &get_cpu_var(memcg_stock);
|
||||
local_irq_save(flags);
|
||||
|
||||
stock = this_cpu_ptr(&memcg_stock);
|
||||
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
|
||||
stock->nr_pages -= nr_pages;
|
||||
ret = true;
|
||||
}
|
||||
put_cpu_var(memcg_stock);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1771,15 +1776,18 @@ static void drain_stock(struct memcg_stock_pcp *stock)
|
||||
stock->cached = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called under preempt disabled or must be called by
|
||||
* a thread which is pinned to local cpu.
|
||||
*/
|
||||
static void drain_local_stock(struct work_struct *dummy)
|
||||
{
|
||||
struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
|
||||
struct memcg_stock_pcp *stock;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
stock = this_cpu_ptr(&memcg_stock);
|
||||
drain_stock(stock);
|
||||
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1788,14 +1796,19 @@ static void drain_local_stock(struct work_struct *dummy)
|
||||
*/
|
||||
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
||||
{
|
||||
struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
|
||||
struct memcg_stock_pcp *stock;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
stock = this_cpu_ptr(&memcg_stock);
|
||||
if (stock->cached != memcg) { /* reset if necessary */
|
||||
drain_stock(stock);
|
||||
stock->cached = memcg;
|
||||
}
|
||||
stock->nr_pages += nr_pages;
|
||||
put_cpu_var(memcg_stock);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -1567,7 +1567,9 @@ static struct page *new_node_page(struct page *page, unsigned long private,
|
||||
return alloc_huge_page_node(page_hstate(compound_head(page)),
|
||||
next_node_in(nid, nmask));
|
||||
|
||||
node_clear(nid, nmask);
|
||||
if (nid != next_node_in(nid, nmask))
|
||||
node_clear(nid, nmask);
|
||||
|
||||
if (PageHighMem(page)
|
||||
|| (zone_idx(page_zone(page)) == ZONE_MOVABLE))
|
||||
gfp_mask |= __GFP_HIGHMEM;
|
||||
|
@@ -264,6 +264,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
|
||||
int ret;
|
||||
struct swap_info_struct *sis = page_swap_info(page);
|
||||
|
||||
BUG_ON(!PageSwapCache(page));
|
||||
if (sis->flags & SWP_FILE) {
|
||||
struct kiocb kiocb;
|
||||
struct file *swap_file = sis->swap_file;
|
||||
@@ -337,6 +338,7 @@ int swap_readpage(struct page *page)
|
||||
int ret = 0;
|
||||
struct swap_info_struct *sis = page_swap_info(page);
|
||||
|
||||
BUG_ON(!PageSwapCache(page));
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(PageUptodate(page), page);
|
||||
if (frontswap_load(page) == 0) {
|
||||
@@ -386,6 +388,7 @@ int swap_set_page_dirty(struct page *page)
|
||||
|
||||
if (sis->flags & SWP_FILE) {
|
||||
struct address_space *mapping = sis->swap_file->f_mapping;
|
||||
BUG_ON(!PageSwapCache(page));
|
||||
return mapping->a_ops->set_page_dirty(page);
|
||||
} else {
|
||||
return __set_page_dirty_no_writeback(page);
|
||||
|
@@ -2724,7 +2724,6 @@ int swapcache_prepare(swp_entry_t entry)
|
||||
struct swap_info_struct *page_swap_info(struct page *page)
|
||||
{
|
||||
swp_entry_t swap = { .val = page_private(page) };
|
||||
BUG_ON(!PageSwapCache(page));
|
||||
return swap_info[swp_type(swap)];
|
||||
}
|
||||
|
||||
|
@@ -134,30 +134,15 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
/* Checks for allocs that are marked in some way as spanning multiple pages. */
|
||||
static inline const char *check_page_span(const void *ptr, unsigned long n,
|
||||
struct page *page, bool to_user)
|
||||
{
|
||||
struct page *page, *endpage;
|
||||
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
|
||||
const void *end = ptr + n - 1;
|
||||
struct page *endpage;
|
||||
bool is_reserved, is_cma;
|
||||
|
||||
/*
|
||||
* Some architectures (arm64) return true for virt_addr_valid() on
|
||||
* vmalloced addresses. Work around this by checking for vmalloc
|
||||
* first.
|
||||
*/
|
||||
if (is_vmalloc_addr(ptr))
|
||||
return NULL;
|
||||
|
||||
if (!virt_addr_valid(ptr))
|
||||
return NULL;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
|
||||
/* Check slab allocator for flags and size. */
|
||||
if (PageSlab(page))
|
||||
return __check_heap_object(ptr, n, page);
|
||||
|
||||
/*
|
||||
* Sometimes the kernel data regions are not marked Reserved (see
|
||||
* check below). And sometimes [_sdata,_edata) does not cover
|
||||
@@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
((unsigned long)end & (unsigned long)PAGE_MASK)))
|
||||
return NULL;
|
||||
|
||||
/* Allow if start and end are inside the same compound page. */
|
||||
/* Allow if fully inside the same compound (__GFP_COMP) page. */
|
||||
endpage = virt_to_head_page(end);
|
||||
if (likely(endpage == page))
|
||||
return NULL;
|
||||
@@ -199,20 +184,47 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
is_reserved = PageReserved(page);
|
||||
is_cma = is_migrate_cma_page(page);
|
||||
if (!is_reserved && !is_cma)
|
||||
goto reject;
|
||||
return "<spans multiple pages>";
|
||||
|
||||
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
|
||||
page = virt_to_head_page(ptr);
|
||||
if (is_reserved && !PageReserved(page))
|
||||
goto reject;
|
||||
return "<spans Reserved and non-Reserved pages>";
|
||||
if (is_cma && !is_migrate_cma_page(page))
|
||||
goto reject;
|
||||
return "<spans CMA and non-CMA pages>";
|
||||
}
|
||||
#endif
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
reject:
|
||||
return "<spans multiple pages>";
|
||||
static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
/*
|
||||
* Some architectures (arm64) return true for virt_addr_valid() on
|
||||
* vmalloced addresses. Work around this by checking for vmalloc
|
||||
* first.
|
||||
*
|
||||
* We also need to check for module addresses explicitly since we
|
||||
* may copy static data from modules to userspace
|
||||
*/
|
||||
if (is_vmalloc_or_module_addr(ptr))
|
||||
return NULL;
|
||||
|
||||
if (!virt_addr_valid(ptr))
|
||||
return NULL;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
|
||||
/* Check slab allocator for flags and size. */
|
||||
if (PageSlab(page))
|
||||
return __check_heap_object(ptr, n, page);
|
||||
|
||||
/* Verify object does not incorrectly span multiple pages. */
|
||||
return check_page_span(ptr, n, page, to_user);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Referens i nytt ärende
Block a user