Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: "Incoming: - a small number of updates to scripts/, ocfs2 and fs/buffer.c - most of MM I still have quite a lot of material (mostly not MM) staged after linux-next due to -next dependencies. I'll send those across next week as the preprequisites get merged up" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (135 commits) mm/page_io.c: annotate refault stalls from swap_readpage mm/Kconfig: fix trivial help text punctuation mm/Kconfig: fix indentation mm/memory_hotplug.c: remove __online_page_set_limits() mm: fix typos in comments when calling __SetPageUptodate() mm: fix struct member name in function comments mm/shmem.c: cast the type of unmap_start to u64 mm: shmem: use proper gfp flags for shmem_writepage() mm/shmem.c: make array 'values' static const, makes object smaller userfaultfd: require CAP_SYS_PTRACE for UFFD_FEATURE_EVENT_FORK fs/userfaultfd.c: wp: clear VM_UFFD_MISSING or VM_UFFD_WP during userfaultfd_register() userfaultfd: wrap the common dst_vma check into an inlined function userfaultfd: remove unnecessary WARN_ON() in __mcopy_atomic_hugetlb() userfaultfd: use vma_pagesize for all huge page size calculation mm/madvise.c: use PAGE_ALIGN[ED] for range checking mm/madvise.c: replace with page_size() in madvise_inject_error() mm/mmap.c: make vma_merge() comment more easy to understand mm/hwpoison-inject: use DEFINE_DEBUGFS_ATTRIBUTE to define debugfs fops autonuma: reduce cache footprint when scanning page tables autonuma: fix watermark checking in migrate_balanced_pgdat() ...
This commit is contained in:
@@ -3156,7 +3156,6 @@ enum {
|
||||
};
|
||||
|
||||
void dio_end_io(struct bio *bio);
|
||||
void dio_warn_stale_pagecache(struct file *filp);
|
||||
|
||||
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
||||
struct block_device *bdev, struct iov_iter *iter,
|
||||
@@ -3201,6 +3200,11 @@ static inline void inode_dio_end(struct inode *inode)
|
||||
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
|
||||
}
|
||||
|
||||
/*
|
||||
* Warn about a page cache invalidation failure diring a direct I/O write.
|
||||
*/
|
||||
void dio_warn_stale_pagecache(struct file *filp);
|
||||
|
||||
extern void inode_set_flags(struct inode *inode, unsigned int flags,
|
||||
unsigned int mask);
|
||||
|
||||
|
@@ -612,6 +612,8 @@ static inline bool pm_suspended_storage(void)
|
||||
/* The below functions must be run on a range from a single zone. */
|
||||
extern int alloc_contig_range(unsigned long start, unsigned long end,
|
||||
unsigned migratetype, gfp_t gfp_mask);
|
||||
extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nodemask);
|
||||
#endif
|
||||
void free_contig_range(unsigned long pfn, unsigned int nr_pages);
|
||||
|
||||
|
@@ -105,8 +105,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
|
||||
void free_huge_page(struct page *page);
|
||||
void hugetlb_fix_reserve_counts(struct inode *inode);
|
||||
extern struct mutex *hugetlb_fault_mutex_table;
|
||||
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
|
||||
pgoff_t idx, unsigned long address);
|
||||
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
|
||||
|
||||
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
|
||||
|
||||
@@ -164,38 +163,130 @@ static inline void adjust_range_if_pmd_sharing_possible(
|
||||
{
|
||||
}
|
||||
|
||||
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
|
||||
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
|
||||
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
|
||||
static inline long follow_hugetlb_page(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma, struct page **pages,
|
||||
struct vm_area_struct **vmas, unsigned long *position,
|
||||
unsigned long *nr_pages, long i, unsigned int flags,
|
||||
int *nonblocking)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct page *follow_huge_addr(struct mm_struct *mm,
|
||||
unsigned long address, int write)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline int copy_hugetlb_page_range(struct mm_struct *dst,
|
||||
struct mm_struct *src, struct vm_area_struct *vma)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_report_meminfo(struct seq_file *m)
|
||||
{
|
||||
}
|
||||
#define hugetlb_report_node_meminfo(n, buf) 0
|
||||
|
||||
static inline int hugetlb_report_node_meminfo(int nid, char *buf)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_show_meminfo(void)
|
||||
{
|
||||
}
|
||||
#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
|
||||
#define follow_huge_pmd(mm, addr, pmd, flags) NULL
|
||||
#define follow_huge_pud(mm, addr, pud, flags) NULL
|
||||
#define follow_huge_pgd(mm, addr, pgd, flags) NULL
|
||||
#define prepare_hugepage_range(file, addr, len) (-EINVAL)
|
||||
#define pmd_huge(x) 0
|
||||
#define pud_huge(x) 0
|
||||
#define is_hugepage_only_range(mm, addr, len) 0
|
||||
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
|
||||
#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
|
||||
src_addr, pagep) ({ BUG(); 0; })
|
||||
#define huge_pte_offset(mm, address, sz) 0
|
||||
|
||||
static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
|
||||
unsigned long address, hugepd_t hpd, int flags,
|
||||
int pdshift)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct page *follow_huge_pmd(struct mm_struct *mm,
|
||||
unsigned long address, pmd_t *pmd, int flags)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct page *follow_huge_pud(struct mm_struct *mm,
|
||||
unsigned long address, pud_t *pud, int flags)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct page *follow_huge_pgd(struct mm_struct *mm,
|
||||
unsigned long address, pgd_t *pgd, int flags)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int prepare_hugepage_range(struct file *file,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pud_huge(pud_t pud)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long len)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned long floor, unsigned long ceiling)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
||||
pte_t *dst_pte,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
unsigned long src_addr,
|
||||
struct page **pagep)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
||||
unsigned long sz)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool isolate_huge_page(struct page *page, struct list_head *list)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#define putback_active_hugepage(p) do {} while (0)
|
||||
#define move_hugetlb_state(old, new, reason) do {} while (0)
|
||||
|
||||
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned long end, pgprot_t newprot)
|
||||
static inline void putback_active_hugepage(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void move_hugetlb_state(struct page *oldpage,
|
||||
struct page *newpage, int reason)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned long hugetlb_change_protection(
|
||||
struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long end, pgprot_t newprot)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -213,9 +304,10 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned int flags)
|
||||
struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned int flags)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
|
@@ -70,8 +70,18 @@ struct kasan_cache {
|
||||
int free_meta_offset;
|
||||
};
|
||||
|
||||
/*
|
||||
* These functions provide a special case to support backing module
|
||||
* allocations with real shadow memory. With KASAN vmalloc, the special
|
||||
* case is unnecessary, as the work is handled in the generic case.
|
||||
*/
|
||||
#ifndef CONFIG_KASAN_VMALLOC
|
||||
int kasan_module_alloc(void *addr, size_t size);
|
||||
void kasan_free_shadow(const struct vm_struct *vm);
|
||||
#else
|
||||
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
||||
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
||||
#endif
|
||||
|
||||
int kasan_add_zero_shadow(void *start, unsigned long size);
|
||||
void kasan_remove_zero_shadow(void *start, unsigned long size);
|
||||
@@ -194,4 +204,25 @@ static inline void *kasan_reset_tag(const void *addr)
|
||||
|
||||
#endif /* CONFIG_KASAN_SW_TAGS */
|
||||
|
||||
#ifdef CONFIG_KASAN_VMALLOC
|
||||
int kasan_populate_vmalloc(unsigned long requested_size,
|
||||
struct vm_struct *area);
|
||||
void kasan_poison_vmalloc(void *start, unsigned long size);
|
||||
void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
||||
unsigned long free_region_start,
|
||||
unsigned long free_region_end);
|
||||
#else
|
||||
static inline int kasan_populate_vmalloc(unsigned long requested_size,
|
||||
struct vm_struct *area)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kasan_poison_vmalloc(void *start, unsigned long size) {}
|
||||
static inline void kasan_release_vmalloc(unsigned long start,
|
||||
unsigned long end,
|
||||
unsigned long free_region_start,
|
||||
unsigned long free_region_end) {}
|
||||
#endif
|
||||
|
||||
#endif /* LINUX_KASAN_H */
|
||||
|
@@ -358,6 +358,9 @@ static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE);
|
||||
}
|
||||
|
||||
void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
|
||||
phys_addr_t min_addr, phys_addr_t max_addr,
|
||||
int nid);
|
||||
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
|
||||
phys_addr_t min_addr, phys_addr_t max_addr,
|
||||
int nid);
|
||||
|
@@ -58,7 +58,6 @@ enum mem_cgroup_protection {
|
||||
|
||||
struct mem_cgroup_reclaim_cookie {
|
||||
pg_data_t *pgdat;
|
||||
int priority;
|
||||
unsigned int generation;
|
||||
};
|
||||
|
||||
@@ -81,7 +80,6 @@ struct mem_cgroup_id {
|
||||
enum mem_cgroup_events_target {
|
||||
MEM_CGROUP_TARGET_THRESH,
|
||||
MEM_CGROUP_TARGET_SOFTLIMIT,
|
||||
MEM_CGROUP_TARGET_NUMAINFO,
|
||||
MEM_CGROUP_NTARGETS,
|
||||
};
|
||||
|
||||
@@ -112,7 +110,7 @@ struct memcg_shrinker_map {
|
||||
};
|
||||
|
||||
/*
|
||||
* per-zone information in memory controller.
|
||||
* per-node information in memory controller.
|
||||
*/
|
||||
struct mem_cgroup_per_node {
|
||||
struct lruvec lruvec;
|
||||
@@ -126,7 +124,7 @@ struct mem_cgroup_per_node {
|
||||
|
||||
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
|
||||
|
||||
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
|
||||
struct mem_cgroup_reclaim_iter iter;
|
||||
|
||||
struct memcg_shrinker_map __rcu *shrinker_map;
|
||||
|
||||
@@ -134,9 +132,6 @@ struct mem_cgroup_per_node {
|
||||
unsigned long usage_in_excess;/* Set to the value by which */
|
||||
/* the soft limit is exceeded*/
|
||||
bool on_tree;
|
||||
bool congested; /* memcg has many dirty pages */
|
||||
/* backed by a congested BDI */
|
||||
|
||||
struct mem_cgroup *memcg; /* Back pointer, we cannot */
|
||||
/* use container_of */
|
||||
};
|
||||
@@ -313,13 +308,6 @@ struct mem_cgroup {
|
||||
struct list_head kmem_caches;
|
||||
#endif
|
||||
|
||||
int last_scanned_node;
|
||||
#if MAX_NUMNODES > 1
|
||||
nodemask_t scan_nodes;
|
||||
atomic_t numainfo_events;
|
||||
atomic_t numainfo_updating;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||
struct list_head cgwb_list;
|
||||
struct wb_domain cgwb_domain;
|
||||
@@ -394,25 +382,27 @@ mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
|
||||
}
|
||||
|
||||
/**
|
||||
* mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
|
||||
* @node: node of the wanted lruvec
|
||||
* mem_cgroup_lruvec - get the lru list vector for a memcg & node
|
||||
* @memcg: memcg of the wanted lruvec
|
||||
*
|
||||
* Returns the lru list vector holding pages for a given @node or a given
|
||||
* @memcg and @zone. This can be the node lruvec, if the memory controller
|
||||
* is disabled.
|
||||
* Returns the lru list vector holding pages for a given @memcg &
|
||||
* @node combination. This can be the node lruvec, if the memory
|
||||
* controller is disabled.
|
||||
*/
|
||||
static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
|
||||
struct mem_cgroup *memcg)
|
||||
static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
|
||||
struct pglist_data *pgdat)
|
||||
{
|
||||
struct mem_cgroup_per_node *mz;
|
||||
struct lruvec *lruvec;
|
||||
|
||||
if (mem_cgroup_disabled()) {
|
||||
lruvec = node_lruvec(pgdat);
|
||||
lruvec = &pgdat->__lruvec;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!memcg)
|
||||
memcg = root_mem_cgroup;
|
||||
|
||||
mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
|
||||
lruvec = &mz->lruvec;
|
||||
out:
|
||||
@@ -728,7 +718,7 @@ static inline void __mod_lruvec_page_state(struct page *page,
|
||||
return;
|
||||
}
|
||||
|
||||
lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
|
||||
lruvec = mem_cgroup_lruvec(page->mem_cgroup, pgdat);
|
||||
__mod_lruvec_state(lruvec, idx, val);
|
||||
}
|
||||
|
||||
@@ -899,16 +889,21 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
|
||||
struct mem_cgroup *memcg)
|
||||
static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
|
||||
struct pglist_data *pgdat)
|
||||
{
|
||||
return node_lruvec(pgdat);
|
||||
return &pgdat->__lruvec;
|
||||
}
|
||||
|
||||
static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
|
||||
struct pglist_data *pgdat)
|
||||
{
|
||||
return &pgdat->lruvec;
|
||||
return &pgdat->__lruvec;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool mm_match_cgroup(struct mm_struct *mm,
|
||||
|
@@ -102,13 +102,10 @@ extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
|
||||
|
||||
typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
|
||||
|
||||
extern void generic_online_page(struct page *page, unsigned int order);
|
||||
extern int set_online_page_callback(online_page_callback_t callback);
|
||||
extern int restore_online_page_callback(online_page_callback_t callback);
|
||||
|
||||
extern void __online_page_set_limits(struct page *page);
|
||||
extern void __online_page_increment_counters(struct page *page);
|
||||
extern void __online_page_free(struct page *page);
|
||||
|
||||
extern int try_online_node(int nid);
|
||||
|
||||
extern int arch_add_memory(int nid, u64 start, u64 size,
|
||||
@@ -229,9 +226,6 @@ void put_online_mems(void);
|
||||
void mem_hotplug_begin(void);
|
||||
void mem_hotplug_done(void);
|
||||
|
||||
extern void set_zone_contiguous(struct zone *zone);
|
||||
extern void clear_zone_contiguous(struct zone *zone);
|
||||
|
||||
#else /* ! CONFIG_MEMORY_HOTPLUG */
|
||||
#define pfn_to_online_page(pfn) \
|
||||
({ \
|
||||
@@ -339,6 +333,9 @@ static inline int remove_memory(int nid, u64 start, u64 size)
|
||||
static inline void __remove_memory(int nid, u64 start, u64 size) {}
|
||||
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
||||
|
||||
extern void set_zone_contiguous(struct zone *zone);
|
||||
extern void clear_zone_contiguous(struct zone *zone);
|
||||
|
||||
extern void __ref free_area_init_core_hotplug(int nid);
|
||||
extern int __add_memory(int nid, u64 start, u64 size);
|
||||
extern int add_memory(int nid, u64 start, u64 size);
|
||||
|
@@ -564,21 +564,6 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
|
||||
struct mmu_gather;
|
||||
struct inode;
|
||||
|
||||
#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
static inline int pmd_devmap(pmd_t pmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int pud_devmap(pud_t pud)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int pgd_devmap(pgd_t pgd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* FIXME: take this include out, include page-flags.h in
|
||||
* files which need it (119 of them)
|
||||
@@ -1643,19 +1628,27 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
|
||||
return (unsigned long)val;
|
||||
}
|
||||
|
||||
void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
|
||||
|
||||
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
|
||||
{
|
||||
atomic_long_add(value, &mm->rss_stat.count[member]);
|
||||
long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
|
||||
|
||||
mm_trace_rss_stat(mm, member, count);
|
||||
}
|
||||
|
||||
static inline void inc_mm_counter(struct mm_struct *mm, int member)
|
||||
{
|
||||
atomic_long_inc(&mm->rss_stat.count[member]);
|
||||
long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
|
||||
|
||||
mm_trace_rss_stat(mm, member, count);
|
||||
}
|
||||
|
||||
static inline void dec_mm_counter(struct mm_struct *mm, int member)
|
||||
{
|
||||
atomic_long_dec(&mm->rss_stat.count[member]);
|
||||
long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
|
||||
|
||||
mm_trace_rss_stat(mm, member, count);
|
||||
}
|
||||
|
||||
/* Optimized variant when page is already known not to be PageAnon */
|
||||
@@ -2214,9 +2207,6 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
|
||||
|
||||
extern void setup_per_cpu_pageset(void);
|
||||
|
||||
extern void zone_pcp_update(struct zone *zone);
|
||||
extern void zone_pcp_reset(struct zone *zone);
|
||||
|
||||
/* page_alloc.c */
|
||||
extern int min_free_kbytes;
|
||||
extern int watermark_boost_factor;
|
||||
@@ -2780,7 +2770,7 @@ extern int sysctl_memory_failure_early_kill;
|
||||
extern int sysctl_memory_failure_recovery;
|
||||
extern void shake_page(struct page *p, int access);
|
||||
extern atomic_long_t num_poisoned_pages __read_mostly;
|
||||
extern int soft_offline_page(struct page *page, int flags);
|
||||
extern int soft_offline_page(unsigned long pfn, int flags);
|
||||
|
||||
|
||||
/*
|
||||
|
@@ -273,12 +273,12 @@ enum lru_list {
|
||||
|
||||
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
|
||||
|
||||
static inline int is_file_lru(enum lru_list lru)
|
||||
static inline bool is_file_lru(enum lru_list lru)
|
||||
{
|
||||
return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
|
||||
}
|
||||
|
||||
static inline int is_active_lru(enum lru_list lru)
|
||||
static inline bool is_active_lru(enum lru_list lru)
|
||||
{
|
||||
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
|
||||
}
|
||||
@@ -296,6 +296,12 @@ struct zone_reclaim_stat {
|
||||
unsigned long recent_scanned[2];
|
||||
};
|
||||
|
||||
enum lruvec_flags {
|
||||
LRUVEC_CONGESTED, /* lruvec has many dirty pages
|
||||
* backed by a congested BDI
|
||||
*/
|
||||
};
|
||||
|
||||
struct lruvec {
|
||||
struct list_head lists[NR_LRU_LISTS];
|
||||
struct zone_reclaim_stat reclaim_stat;
|
||||
@@ -303,12 +309,14 @@ struct lruvec {
|
||||
atomic_long_t inactive_age;
|
||||
/* Refaults at the time of last reclaim cycle */
|
||||
unsigned long refaults;
|
||||
/* Various lruvec state flags (enum lruvec_flags) */
|
||||
unsigned long flags;
|
||||
#ifdef CONFIG_MEMCG
|
||||
struct pglist_data *pgdat;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Isolate unmapped file */
|
||||
/* Isolate unmapped pages */
|
||||
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
|
||||
/* Isolate for asynchronous migration */
|
||||
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
|
||||
@@ -572,9 +580,6 @@ struct zone {
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
enum pgdat_flags {
|
||||
PGDAT_CONGESTED, /* pgdat has many dirty pages backed by
|
||||
* a congested BDI
|
||||
*/
|
||||
PGDAT_DIRTY, /* reclaim scanning has recently found
|
||||
* many dirty file pages at the tail
|
||||
* of the LRU.
|
||||
@@ -777,7 +782,13 @@ typedef struct pglist_data {
|
||||
#endif
|
||||
|
||||
/* Fields commonly accessed by the page reclaim scanner */
|
||||
struct lruvec lruvec;
|
||||
|
||||
/*
|
||||
* NOTE: THIS IS UNUSED IF MEMCG IS ENABLED.
|
||||
*
|
||||
* Use mem_cgroup_lruvec() to look up lruvecs.
|
||||
*/
|
||||
struct lruvec __lruvec;
|
||||
|
||||
unsigned long flags;
|
||||
|
||||
@@ -800,11 +811,6 @@ typedef struct pglist_data {
|
||||
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
|
||||
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
|
||||
|
||||
static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
|
||||
{
|
||||
return &pgdat->lruvec;
|
||||
}
|
||||
|
||||
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
|
||||
{
|
||||
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
|
||||
@@ -842,7 +848,7 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
|
||||
#ifdef CONFIG_MEMCG
|
||||
return lruvec->pgdat;
|
||||
#else
|
||||
return container_of(lruvec, struct pglist_data, lruvec);
|
||||
return container_of(lruvec, struct pglist_data, __lruvec);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1079,7 +1085,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
|
||||
/**
|
||||
* for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
|
||||
* @zone - The current zone in the iterator
|
||||
* @z - The current pointer within zonelist->zones being iterated
|
||||
* @z - The current pointer within zonelist->_zonerefs being iterated
|
||||
* @zlist - The zonelist being iterated
|
||||
* @highidx - The zone index of the highest zone to return
|
||||
* @nodemask - Nodemask allowed by the allocator
|
||||
|
@@ -91,7 +91,7 @@ void module_arch_cleanup(struct module *mod);
|
||||
/* Any cleanup before freeing mod->module_init */
|
||||
void module_arch_freeing_init(struct module *mod);
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
|
||||
#include <linux/kasan.h>
|
||||
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
|
||||
#else
|
||||
|
@@ -30,7 +30,7 @@ static inline bool is_migrate_isolate(int migratetype)
|
||||
}
|
||||
#endif
|
||||
|
||||
#define SKIP_HWPOISON 0x1
|
||||
#define MEMORY_OFFLINE 0x1
|
||||
#define REPORT_FAILURE 0x2
|
||||
|
||||
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
|
||||
@@ -58,7 +58,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
||||
* Test all pages in [start_pfn, end_pfn) are isolated or not.
|
||||
*/
|
||||
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
||||
bool skip_hwpoisoned_pages);
|
||||
int isol_flags);
|
||||
|
||||
struct page *alloc_migrate_target(struct page *page, unsigned long private);
|
||||
|
||||
|
@@ -561,26 +561,6 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
||||
return __kmalloc(size, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine size used for the nth kmalloc cache.
|
||||
* return size or 0 if a kmalloc cache for that
|
||||
* size does not exist
|
||||
*/
|
||||
static __always_inline unsigned int kmalloc_size(unsigned int n)
|
||||
{
|
||||
#ifndef CONFIG_SLOB
|
||||
if (n > 2)
|
||||
return 1U << n;
|
||||
|
||||
if (n == 1 && KMALLOC_MIN_SIZE <= 32)
|
||||
return 96;
|
||||
|
||||
if (n == 2 && KMALLOC_MIN_SIZE <= 64)
|
||||
return 192;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
#ifndef CONFIG_SLOB
|
||||
|
@@ -216,6 +216,8 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
|
||||
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
|
||||
const void *from, size_t available);
|
||||
|
||||
int ptr_to_hashval(const void *ptr, unsigned long *hashval_out);
|
||||
|
||||
/**
|
||||
* strstarts - does @str start with @prefix?
|
||||
* @str: string to examine
|
||||
|
@@ -307,7 +307,7 @@ struct vma_swap_readahead {
|
||||
};
|
||||
|
||||
/* linux/mm/workingset.c */
|
||||
void *workingset_eviction(struct page *page);
|
||||
void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
|
||||
void workingset_refault(struct page *page, void *shadow);
|
||||
void workingset_activation(struct page *page);
|
||||
|
||||
|
@@ -22,6 +22,18 @@ struct notifier_block; /* in notifier.h */
|
||||
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
|
||||
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
|
||||
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
|
||||
|
||||
/*
|
||||
* VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
|
||||
*
|
||||
* If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
|
||||
* shadow memory has been mapped. It's used to handle allocation errors so that
|
||||
* we don't try to poision shadow on free if it was never allocated.
|
||||
*
|
||||
* Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
|
||||
* determine which allocations need the module shadow freed.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
|
||||
* vfree_atomic().
|
||||
|
Reference in New Issue
Block a user