Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - large KASAN update to use arm's "software tag-based mode" - a few misc things - sh updates - ocfs2 updates - just about all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (167 commits) kernel/fork.c: mark 'stack_vm_area' with __maybe_unused memcg, oom: notify on oom killer invocation from the charge path mm, swap: fix swapoff with KSM pages include/linux/gfp.h: fix typo mm/hmm: fix memremap.h, move dev_page_fault_t callback to hmm hugetlbfs: Use i_mmap_rwsem to fix page fault/truncate race hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization memory_hotplug: add missing newlines to debugging output mm: remove __hugepage_set_anon_rmap() include/linux/vmstat.h: remove unused page state adjustment macro mm/page_alloc.c: allow error injection mm: migrate: drop unused argument of migrate_page_move_mapping() blkdev: avoid migration stalls for blkdev pages mm: migrate: provide buffer_migrate_page_norefs() mm: migrate: move migrate_page_lock_buffers() mm: migrate: lock buffers before migrate_page_move_mapping() mm: migration: factor out code to compute expected number of page references mm, page_alloc: enable pcpu_drain with zone capability kmemleak: add config to select auto scan mm/page_alloc.c: don't call kasan_free_pages() at deferred mem init ...
This commit is contained in:
2
fs/aio.c
2
fs/aio.c
@@ -415,7 +415,7 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
|
||||
BUG_ON(PageWriteback(old));
|
||||
get_page(new);
|
||||
|
||||
rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
|
||||
rc = migrate_page_move_mapping(mapping, new, old, mode, 1);
|
||||
if (rc != MIGRATEPAGE_SUCCESS) {
|
||||
put_page(new);
|
||||
goto out_unlock;
|
||||
|
@@ -1992,6 +1992,7 @@ static const struct address_space_operations def_blk_aops = {
|
||||
.writepages = blkdev_writepages,
|
||||
.releasepage = blkdev_releasepage,
|
||||
.direct_IO = blkdev_direct_IO,
|
||||
.migratepage = buffer_migrate_page_norefs,
|
||||
.is_dirty_writeback = buffer_check_dirty_writeback,
|
||||
};
|
||||
|
||||
|
@@ -810,7 +810,7 @@ static inline int default_congestion_kb(void)
|
||||
* This allows larger machines to have larger/more transfers.
|
||||
* Limit the default to 256M
|
||||
*/
|
||||
congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
|
||||
congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10);
|
||||
if (congestion_kb > 256*1024)
|
||||
congestion_kb = 256*1024;
|
||||
|
||||
|
8
fs/dax.c
8
fs/dax.c
@@ -779,7 +779,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
|
||||
|
||||
i_mmap_lock_read(mapping);
|
||||
vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
|
||||
unsigned long address, start, end;
|
||||
struct mmu_notifier_range range;
|
||||
unsigned long address;
|
||||
|
||||
cond_resched();
|
||||
|
||||
@@ -793,7 +794,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
|
||||
* call mmu_notifier_invalidate_range_start() on our behalf
|
||||
* before taking any lock.
|
||||
*/
|
||||
if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
|
||||
if (follow_pte_pmd(vma->vm_mm, address, &range,
|
||||
&ptep, &pmdp, &ptl))
|
||||
continue;
|
||||
|
||||
/*
|
||||
@@ -835,7 +837,7 @@ unlock_pte:
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
}
|
||||
|
||||
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
}
|
||||
i_mmap_unlock_read(mapping);
|
||||
}
|
||||
|
@@ -2738,7 +2738,7 @@ int f2fs_migrate_page(struct address_space *mapping,
|
||||
*/
|
||||
extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
|
||||
rc = migrate_page_move_mapping(mapping, newpage,
|
||||
page, NULL, mode, extra_count);
|
||||
page, mode, extra_count);
|
||||
if (rc != MIGRATEPAGE_SUCCESS) {
|
||||
if (atomic_written)
|
||||
mutex_unlock(&fi->inmem_lock);
|
||||
|
@@ -380,10 +380,11 @@ void __init files_init(void)
|
||||
void __init files_maxfiles_init(void)
|
||||
{
|
||||
unsigned long n;
|
||||
unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
|
||||
unsigned long nr_pages = totalram_pages();
|
||||
unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2;
|
||||
|
||||
memreserve = min(memreserve, totalram_pages - 1);
|
||||
n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
|
||||
memreserve = min(memreserve, nr_pages - 1);
|
||||
n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
|
||||
|
||||
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
|
||||
}
|
||||
|
@@ -824,7 +824,7 @@ static const struct super_operations fuse_super_operations = {
|
||||
static void sanitize_global_limit(unsigned *limit)
|
||||
{
|
||||
if (*limit == 0)
|
||||
*limit = ((totalram_pages << PAGE_SHIFT) >> 13) /
|
||||
*limit = ((totalram_pages() << PAGE_SHIFT) >> 13) /
|
||||
sizeof(struct fuse_req);
|
||||
|
||||
if (*limit >= 1 << 16)
|
||||
|
@@ -383,17 +383,16 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
|
||||
* truncation is indicated by end of range being LLONG_MAX
|
||||
* In this case, we first scan the range and release found pages.
|
||||
* After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
|
||||
* maps and global counts. Page faults can not race with truncation
|
||||
* in this routine. hugetlb_no_page() prevents page faults in the
|
||||
* truncated range. It checks i_size before allocation, and again after
|
||||
* with the page table lock for the page held. The same lock must be
|
||||
* acquired to unmap a page.
|
||||
* maps and global counts.
|
||||
* hole punch is indicated if end is not LLONG_MAX
|
||||
* In the hole punch case we scan the range and release found pages.
|
||||
* Only when releasing a page is the associated region/reserv map
|
||||
* deleted. The region/reserv map for ranges without associated
|
||||
* pages are not modified. Page faults can race with hole punch.
|
||||
* This is indicated if we find a mapped page.
|
||||
* pages are not modified.
|
||||
*
|
||||
* Callers of this routine must hold the i_mmap_rwsem in write mode to prevent
|
||||
* races with page faults.
|
||||
*
|
||||
* Note: If the passed end of range value is beyond the end of file, but
|
||||
* not LLONG_MAX this routine still performs a hole punch operation.
|
||||
*/
|
||||
@@ -423,32 +422,14 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
||||
|
||||
for (i = 0; i < pagevec_count(&pvec); ++i) {
|
||||
struct page *page = pvec.pages[i];
|
||||
u32 hash;
|
||||
|
||||
index = page->index;
|
||||
hash = hugetlb_fault_mutex_hash(h, current->mm,
|
||||
&pseudo_vma,
|
||||
mapping, index, 0);
|
||||
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
||||
|
||||
/*
|
||||
* If page is mapped, it was faulted in after being
|
||||
* unmapped in caller. Unmap (again) now after taking
|
||||
* the fault mutex. The mutex will prevent faults
|
||||
* until we finish removing the page.
|
||||
*
|
||||
* This race can only happen in the hole punch case.
|
||||
* Getting here in a truncate operation is a bug.
|
||||
* A mapped page is impossible as callers should unmap
|
||||
* all references before calling. And, i_mmap_rwsem
|
||||
* prevents the creation of additional mappings.
|
||||
*/
|
||||
if (unlikely(page_mapped(page))) {
|
||||
BUG_ON(truncate_op);
|
||||
|
||||
i_mmap_lock_write(mapping);
|
||||
hugetlb_vmdelete_list(&mapping->i_mmap,
|
||||
index * pages_per_huge_page(h),
|
||||
(index + 1) * pages_per_huge_page(h));
|
||||
i_mmap_unlock_write(mapping);
|
||||
}
|
||||
VM_BUG_ON(page_mapped(page));
|
||||
|
||||
lock_page(page);
|
||||
/*
|
||||
@@ -470,7 +451,6 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
||||
}
|
||||
|
||||
unlock_page(page);
|
||||
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||
}
|
||||
huge_pagevec_release(&pvec);
|
||||
cond_resched();
|
||||
@@ -482,9 +462,20 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
||||
|
||||
static void hugetlbfs_evict_inode(struct inode *inode)
|
||||
{
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct resv_map *resv_map;
|
||||
|
||||
/*
|
||||
* The vfs layer guarantees that there are no other users of this
|
||||
* inode. Therefore, it would be safe to call remove_inode_hugepages
|
||||
* without holding i_mmap_rwsem. We acquire and hold here to be
|
||||
* consistent with other callers. Since there will be no contention
|
||||
* on the semaphore, overhead is negligible.
|
||||
*/
|
||||
i_mmap_lock_write(mapping);
|
||||
remove_inode_hugepages(inode, 0, LLONG_MAX);
|
||||
i_mmap_unlock_write(mapping);
|
||||
|
||||
resv_map = (struct resv_map *)inode->i_mapping->private_data;
|
||||
/* root inode doesn't have the resv_map, so we should check it */
|
||||
if (resv_map)
|
||||
@@ -505,8 +496,8 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
|
||||
i_mmap_lock_write(mapping);
|
||||
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
|
||||
hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
|
||||
i_mmap_unlock_write(mapping);
|
||||
remove_inode_hugepages(inode, offset, LLONG_MAX);
|
||||
i_mmap_unlock_write(mapping);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -540,8 +531,8 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||
hugetlb_vmdelete_list(&mapping->i_mmap,
|
||||
hole_start >> PAGE_SHIFT,
|
||||
hole_end >> PAGE_SHIFT);
|
||||
i_mmap_unlock_write(mapping);
|
||||
remove_inode_hugepages(inode, hole_start, hole_end);
|
||||
i_mmap_unlock_write(mapping);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
@@ -624,7 +615,11 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
|
||||
/* addr is the offset within the file (zero based) */
|
||||
addr = index * hpage_size;
|
||||
|
||||
/* mutex taken here, fault path and hole punch */
|
||||
/*
|
||||
* fault mutex taken here, protects against fault path
|
||||
* and hole punch. inode_lock previously taken protects
|
||||
* against truncation.
|
||||
*/
|
||||
hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
|
||||
index, addr);
|
||||
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
||||
|
@@ -563,7 +563,7 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
|
||||
ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
|
||||
if (ret != MIGRATEPAGE_SUCCESS)
|
||||
return ret;
|
||||
|
||||
|
@@ -2121,7 +2121,7 @@ int __init nfs_init_writepagecache(void)
|
||||
* This allows larger machines to have larger/more transfers.
|
||||
* Limit the default to 256M
|
||||
*/
|
||||
nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
|
||||
nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10);
|
||||
if (nfs_congestion_kb > 256*1024)
|
||||
nfs_congestion_kb = 256*1024;
|
||||
|
||||
|
@@ -99,7 +99,7 @@ static unsigned int
|
||||
nfsd_cache_size_limit(void)
|
||||
{
|
||||
unsigned int limit;
|
||||
unsigned long low_pages = totalram_pages - totalhigh_pages;
|
||||
unsigned long low_pages = totalram_pages() - totalhigh_pages();
|
||||
|
||||
limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
|
||||
return min_t(unsigned int, limit, 256*1024);
|
||||
|
@@ -47,7 +47,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask)
|
||||
return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM);
|
||||
/* return (void *)__get_free_page(gfp_mask); */
|
||||
}
|
||||
if (likely((size >> PAGE_SHIFT) < totalram_pages))
|
||||
if (likely((size >> PAGE_SHIFT) < totalram_pages()))
|
||||
return __vmalloc(size, gfp_mask, PAGE_KERNEL);
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
ccflags-y := -Ifs/ocfs2
|
||||
ccflags-y := -I$(src)
|
||||
|
||||
obj-$(CONFIG_OCFS2_FS) += \
|
||||
ocfs2.o \
|
||||
|
@@ -161,7 +161,6 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
|
||||
#endif
|
||||
}
|
||||
|
||||
clear_buffer_uptodate(bh);
|
||||
get_bh(bh); /* for end_buffer_read_sync() */
|
||||
bh->b_end_io = end_buffer_read_sync;
|
||||
submit_bh(REQ_OP_READ, 0, bh);
|
||||
@@ -341,7 +340,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
|
||||
continue;
|
||||
}
|
||||
|
||||
clear_buffer_uptodate(bh);
|
||||
get_bh(bh); /* for end_buffer_read_sync() */
|
||||
if (validate)
|
||||
set_buffer_needs_validate(bh);
|
||||
|
@@ -582,9 +582,10 @@ bail:
|
||||
}
|
||||
|
||||
static int o2hb_read_slots(struct o2hb_region *reg,
|
||||
unsigned int begin_slot,
|
||||
unsigned int max_slots)
|
||||
{
|
||||
unsigned int current_slot=0;
|
||||
unsigned int current_slot = begin_slot;
|
||||
int status;
|
||||
struct o2hb_bio_wait_ctxt wc;
|
||||
struct bio *bio;
|
||||
@@ -1093,9 +1094,14 @@ static int o2hb_highest_node(unsigned long *nodes, int numbits)
|
||||
return find_last_bit(nodes, numbits);
|
||||
}
|
||||
|
||||
static int o2hb_lowest_node(unsigned long *nodes, int numbits)
|
||||
{
|
||||
return find_first_bit(nodes, numbits);
|
||||
}
|
||||
|
||||
static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
|
||||
{
|
||||
int i, ret, highest_node;
|
||||
int i, ret, highest_node, lowest_node;
|
||||
int membership_change = 0, own_slot_ok = 0;
|
||||
unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
||||
unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
|
||||
@@ -1120,7 +1126,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
|
||||
}
|
||||
|
||||
highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
|
||||
if (highest_node >= O2NM_MAX_NODES) {
|
||||
lowest_node = o2hb_lowest_node(configured_nodes, O2NM_MAX_NODES);
|
||||
if (highest_node >= O2NM_MAX_NODES || lowest_node >= O2NM_MAX_NODES) {
|
||||
mlog(ML_NOTICE, "o2hb: No configured nodes found!\n");
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
@@ -1130,7 +1137,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
|
||||
* yet. Of course, if the node definitions have holes in them
|
||||
* then we're reading an empty slot anyway... Consider this
|
||||
* best-effort. */
|
||||
ret = o2hb_read_slots(reg, highest_node + 1);
|
||||
ret = o2hb_read_slots(reg, lowest_node, highest_node + 1);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
goto bail;
|
||||
@@ -1801,7 +1808,7 @@ static int o2hb_populate_slot_data(struct o2hb_region *reg)
|
||||
struct o2hb_disk_slot *slot;
|
||||
struct o2hb_disk_heartbeat_block *hb_block;
|
||||
|
||||
ret = o2hb_read_slots(reg, reg->hr_blocks);
|
||||
ret = o2hb_read_slots(reg, 0, reg->hr_blocks);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
ccflags-y := -Ifs/ocfs2
|
||||
ccflags-y := -I$(src)/..
|
||||
|
||||
obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
ccflags-y := -Ifs/ocfs2
|
||||
ccflags-y := -I$(src)/..
|
||||
|
||||
obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
|
||||
|
||||
|
@@ -179,7 +179,7 @@ bail:
|
||||
static int dlmfs_file_release(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
int level, status;
|
||||
int level;
|
||||
struct dlmfs_inode_private *ip = DLMFS_I(inode);
|
||||
struct dlmfs_filp_private *fp = file->private_data;
|
||||
|
||||
@@ -188,7 +188,6 @@ static int dlmfs_file_release(struct inode *inode,
|
||||
|
||||
mlog(0, "close called on inode %lu\n", inode->i_ino);
|
||||
|
||||
status = 0;
|
||||
if (fp) {
|
||||
level = fp->fp_lock_level;
|
||||
if (level != DLM_LOCK_IV)
|
||||
|
@@ -1017,7 +1017,8 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
|
||||
mlog_errno(status);
|
||||
}
|
||||
|
||||
if (status == 0) {
|
||||
/* Shutdown the kernel journal system */
|
||||
if (!jbd2_journal_destroy(journal->j_journal) && !status) {
|
||||
/*
|
||||
* Do not toggle if flush was unsuccessful otherwise
|
||||
* will leave dirty metadata in a "clean" journal
|
||||
@@ -1026,9 +1027,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
|
||||
if (status < 0)
|
||||
mlog_errno(status);
|
||||
}
|
||||
|
||||
/* Shutdown the kernel journal system */
|
||||
jbd2_journal_destroy(journal->j_journal);
|
||||
journal->j_journal = NULL;
|
||||
|
||||
OCFS2_I(inode)->ip_open_count--;
|
||||
|
@@ -345,13 +345,18 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
|
||||
if (num_used
|
||||
|| alloc->id1.bitmap1.i_used
|
||||
|| alloc->id1.bitmap1.i_total
|
||||
|| la->la_bm_off)
|
||||
mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
|
||||
|| la->la_bm_off) {
|
||||
mlog(ML_ERROR, "inconsistent detected, clean journal with"
|
||||
" unrecovered local alloc, please run fsck.ocfs2!\n"
|
||||
"found = %u, set = %u, taken = %u, off = %u\n",
|
||||
num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
|
||||
le32_to_cpu(alloc->id1.bitmap1.i_total),
|
||||
OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
|
||||
|
||||
status = -EINVAL;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
osb->local_alloc_bh = alloc_bh;
|
||||
osb->local_alloc_state = OCFS2_LA_ENABLED;
|
||||
|
||||
@@ -835,7 +840,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
|
||||
u32 *numbits,
|
||||
struct ocfs2_alloc_reservation *resv)
|
||||
{
|
||||
int numfound = 0, bitoff, left, startoff, lastzero;
|
||||
int numfound = 0, bitoff, left, startoff;
|
||||
int local_resv = 0;
|
||||
struct ocfs2_alloc_reservation r;
|
||||
void *bitmap = NULL;
|
||||
@@ -873,7 +878,6 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
|
||||
bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
|
||||
|
||||
numfound = bitoff = startoff = 0;
|
||||
lastzero = -1;
|
||||
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
|
||||
while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) {
|
||||
if (bitoff == left) {
|
||||
|
@@ -392,6 +392,15 @@ static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm)
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
static inline void task_thp_status(struct seq_file *m, struct mm_struct *mm)
|
||||
{
|
||||
bool thp_enabled = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE);
|
||||
|
||||
if (thp_enabled)
|
||||
thp_enabled = !test_bit(MMF_DISABLE_THP, &mm->flags);
|
||||
seq_printf(m, "THP_enabled:\t%d\n", thp_enabled);
|
||||
}
|
||||
|
||||
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
|
||||
struct pid *pid, struct task_struct *task)
|
||||
{
|
||||
@@ -406,6 +415,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
|
||||
if (mm) {
|
||||
task_mem(m, mm);
|
||||
task_core_dumping(m, mm);
|
||||
task_thp_status(m, mm);
|
||||
mmput(mm);
|
||||
}
|
||||
task_sig(m, task);
|
||||
|
@@ -530,7 +530,7 @@ static const struct file_operations proc_lstats_operations = {
|
||||
static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
|
||||
struct pid *pid, struct task_struct *task)
|
||||
{
|
||||
unsigned long totalpages = totalram_pages + total_swap_pages;
|
||||
unsigned long totalpages = totalram_pages() + total_swap_pages;
|
||||
unsigned long points = 0;
|
||||
|
||||
points = oom_badness(task, NULL, NULL, totalpages) *
|
||||
|
@@ -46,7 +46,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
|
||||
ppage = pfn_to_page(pfn);
|
||||
else
|
||||
ppage = NULL;
|
||||
if (!ppage || PageSlab(ppage))
|
||||
if (!ppage || PageSlab(ppage) || page_has_type(ppage))
|
||||
pcount = 0;
|
||||
else
|
||||
pcount = page_mapcount(ppage);
|
||||
|
@@ -790,6 +790,8 @@ static int show_smap(struct seq_file *m, void *v)
|
||||
|
||||
__show_smap(m, &mss);
|
||||
|
||||
seq_printf(m, "THPeligible: %d\n", transparent_hugepage_enabled(vma));
|
||||
|
||||
if (arch_pkeys_enabled())
|
||||
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
|
||||
show_smap_vma_flags(m, vma);
|
||||
@@ -1096,6 +1098,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||
return -ESRCH;
|
||||
mm = get_task_mm(task);
|
||||
if (mm) {
|
||||
struct mmu_notifier_range range;
|
||||
struct clear_refs_private cp = {
|
||||
.type = type,
|
||||
};
|
||||
@@ -1139,11 +1142,13 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||
downgrade_write(&mm->mmap_sem);
|
||||
break;
|
||||
}
|
||||
mmu_notifier_invalidate_range_start(mm, 0, -1);
|
||||
|
||||
mmu_notifier_range_init(&range, mm, 0, -1UL);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
}
|
||||
walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
|
||||
if (type == CLEAR_REFS_SOFT_DIRTY)
|
||||
mmu_notifier_invalidate_range_end(mm, 0, -1);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
tlb_finish_mmu(&tlb, 0, -1);
|
||||
up_read(&mm->mmap_sem);
|
||||
out_mm:
|
||||
|
@@ -1481,7 +1481,7 @@ static int ubifs_migrate_page(struct address_space *mapping,
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
|
||||
rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
|
||||
if (rc != MIGRATEPAGE_SUCCESS)
|
||||
return rc;
|
||||
|
||||
|
@@ -53,7 +53,7 @@ struct userfaultfd_ctx {
|
||||
/* a refile sequence protected by fault_pending_wqh lock */
|
||||
struct seqcount refile_seq;
|
||||
/* pseudo fd refcounting */
|
||||
atomic_t refcount;
|
||||
refcount_t refcount;
|
||||
/* userfaultfd syscall flags */
|
||||
unsigned int flags;
|
||||
/* features requested from the userspace */
|
||||
@@ -140,8 +140,7 @@ out:
|
||||
*/
|
||||
static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
|
||||
{
|
||||
if (!atomic_inc_not_zero(&ctx->refcount))
|
||||
BUG();
|
||||
refcount_inc(&ctx->refcount);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -154,7 +153,7 @@ static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
|
||||
*/
|
||||
static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
|
||||
{
|
||||
if (atomic_dec_and_test(&ctx->refcount)) {
|
||||
if (refcount_dec_and_test(&ctx->refcount)) {
|
||||
VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
|
||||
VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
|
||||
VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
|
||||
@@ -686,7 +685,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
atomic_set(&ctx->refcount, 1);
|
||||
refcount_set(&ctx->refcount, 1);
|
||||
ctx->flags = octx->flags;
|
||||
ctx->state = UFFD_STATE_RUNNING;
|
||||
ctx->features = octx->features;
|
||||
@@ -736,10 +735,18 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
|
||||
struct userfaultfd_ctx *ctx;
|
||||
|
||||
ctx = vma->vm_userfaultfd_ctx.ctx;
|
||||
if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
|
||||
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
|
||||
vm_ctx->ctx = ctx;
|
||||
userfaultfd_ctx_get(ctx);
|
||||
WRITE_ONCE(ctx->mmap_changing, true);
|
||||
} else {
|
||||
/* Drop uffd context if remap feature not enabled */
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1927,7 +1934,7 @@ SYSCALL_DEFINE1(userfaultfd, int, flags)
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_set(&ctx->refcount, 1);
|
||||
refcount_set(&ctx->refcount, 1);
|
||||
ctx->flags = flags;
|
||||
ctx->features = 0;
|
||||
ctx->state = UFFD_STATE_WAIT_API;
|
||||
|
Reference in New Issue
Block a user