Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - a few misc things - a few Y2038 fixes - ntfs fixes - arch/sh tweaks - ocfs2 updates - most of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (111 commits) mm/hmm.c: remove unused variables align_start and align_end fs/userfaultfd.c: remove redundant pointer uwq mm, vmacache: hash addresses based on pmd mm/list_lru: introduce list_lru_shrink_walk_irq() mm/list_lru.c: pass struct list_lru_node* as an argument to __list_lru_walk_one() mm/list_lru.c: move locking from __list_lru_walk_one() to its caller mm/list_lru.c: use list_lru_walk_one() in list_lru_walk_node() mm, swap: make CONFIG_THP_SWAP depend on CONFIG_SWAP mm/sparse: delete old sparse_init and enable new one mm/sparse: add new sparse_init_nid() and sparse_init() mm/sparse: move buffer init/fini to the common place mm/sparse: use the new sparse buffer functions in non-vmemmap mm/sparse: abstract sparse buffer allocations mm/hugetlb.c: don't zero 1GiB bootmem pages mm, page_alloc: double zone's batchsize mm/oom_kill.c: document oom_lock mm/hugetlb: remove gigantic page support for HIGHMEM mm, oom: remove sleep from under oom_lock kernel/dma: remove unsupported gfp_mask parameter from dma_alloc_from_contiguous() mm/cma: remove unsupported gfp_mask parameter from cma_alloc() ...
This commit is contained in:
@@ -53,7 +53,7 @@
|
||||
({ \
|
||||
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
|
||||
_pfx "mask is not constant"); \
|
||||
BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
|
||||
BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
|
||||
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
|
||||
~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
|
||||
_pfx "value too large for the field"); \
|
||||
|
@@ -33,7 +33,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
||||
const char *name,
|
||||
struct cma **res_cma);
|
||||
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
|
||||
gfp_t gfp_mask);
|
||||
bool no_warn);
|
||||
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
|
||||
|
||||
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
|
||||
|
@@ -112,7 +112,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
|
||||
}
|
||||
|
||||
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
|
||||
unsigned int order, gfp_t gfp_mask);
|
||||
unsigned int order, bool no_warn);
|
||||
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||
int count);
|
||||
|
||||
@@ -145,7 +145,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
|
||||
|
||||
static inline
|
||||
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
|
||||
unsigned int order, gfp_t gfp_mask)
|
||||
unsigned int order, bool no_warn)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -179,7 +179,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||
#define ATTR_ATIME_SET (1 << 7)
|
||||
#define ATTR_MTIME_SET (1 << 8)
|
||||
#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
|
||||
#define ATTR_ATTR_FLAG (1 << 10)
|
||||
#define ATTR_KILL_SUID (1 << 11)
|
||||
#define ATTR_KILL_SGID (1 << 12)
|
||||
#define ATTR_FILE (1 << 13)
|
||||
@@ -345,6 +344,10 @@ struct address_space_operations {
|
||||
/* Set a page dirty. Return true if this dirtied it */
|
||||
int (*set_page_dirty)(struct page *page);
|
||||
|
||||
/*
|
||||
* Reads in the requested pages. Unlike ->readpage(), this is
|
||||
* PURELY used for read-ahead!.
|
||||
*/
|
||||
int (*readpages)(struct file *filp, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages);
|
||||
|
||||
|
@@ -84,6 +84,8 @@ struct fsnotify_event_private_data;
|
||||
struct fsnotify_fname;
|
||||
struct fsnotify_iter_info;
|
||||
|
||||
struct mem_cgroup;
|
||||
|
||||
/*
|
||||
* Each group much define these ops. The fsnotify infrastructure will call
|
||||
* these operations for each relevant group.
|
||||
@@ -127,6 +129,8 @@ struct fsnotify_event {
|
||||
* everything will be cleaned up.
|
||||
*/
|
||||
struct fsnotify_group {
|
||||
const struct fsnotify_ops *ops; /* how this group handles things */
|
||||
|
||||
/*
|
||||
* How the refcnt is used is up to each group. When the refcnt hits 0
|
||||
* fsnotify will clean up all of the resources associated with this group.
|
||||
@@ -137,8 +141,6 @@ struct fsnotify_group {
|
||||
*/
|
||||
refcount_t refcnt; /* things with interest in this group */
|
||||
|
||||
const struct fsnotify_ops *ops; /* how this group handles things */
|
||||
|
||||
/* needed to send notification to userspace */
|
||||
spinlock_t notification_lock; /* protect the notification_list */
|
||||
struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
|
||||
@@ -160,6 +162,8 @@ struct fsnotify_group {
|
||||
atomic_t num_marks; /* 1 for each mark and 1 for not being
|
||||
* past the point of no return when freeing
|
||||
* a group */
|
||||
atomic_t user_waits; /* Number of tasks waiting for user
|
||||
* response */
|
||||
struct list_head marks_list; /* all inode marks for this group */
|
||||
|
||||
struct fasync_struct *fsn_fa; /* async notification */
|
||||
@@ -167,8 +171,8 @@ struct fsnotify_group {
|
||||
struct fsnotify_event *overflow_event; /* Event we queue when the
|
||||
* notification list is too
|
||||
* full */
|
||||
atomic_t user_waits; /* Number of tasks waiting for user
|
||||
* response */
|
||||
|
||||
struct mem_cgroup *memcg; /* memcg to charge allocations */
|
||||
|
||||
/* groups can define private fields here or use the void *private */
|
||||
union {
|
||||
|
@@ -348,9 +348,6 @@ struct hstate {
|
||||
struct huge_bootmem_page {
|
||||
struct list_head list;
|
||||
struct hstate *hstate;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
phys_addr_t phys;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||
|
@@ -20,7 +20,7 @@ extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
|
||||
extern pud_t kasan_zero_pud[PTRS_PER_PUD];
|
||||
extern p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D];
|
||||
|
||||
void kasan_populate_zero_shadow(const void *shadow_start,
|
||||
int kasan_populate_zero_shadow(const void *shadow_start,
|
||||
const void *shadow_end);
|
||||
|
||||
static inline void *kasan_mem_to_shadow(const void *addr)
|
||||
@@ -71,6 +71,9 @@ struct kasan_cache {
|
||||
int kasan_module_alloc(void *addr, size_t size);
|
||||
void kasan_free_shadow(const struct vm_struct *vm);
|
||||
|
||||
int kasan_add_zero_shadow(void *start, unsigned long size);
|
||||
void kasan_remove_zero_shadow(void *start, unsigned long size);
|
||||
|
||||
size_t ksize(const void *);
|
||||
static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
|
||||
size_t kasan_metadata_size(struct kmem_cache *cache);
|
||||
@@ -124,6 +127,14 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
|
||||
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
||||
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
||||
|
||||
static inline int kasan_add_zero_shadow(void *start, unsigned long size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void kasan_remove_zero_shadow(void *start,
|
||||
unsigned long size)
|
||||
{}
|
||||
|
||||
static inline void kasan_unpoison_slab(const void *ptr) { }
|
||||
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
|
||||
|
||||
|
@@ -42,7 +42,7 @@ struct list_lru_node {
|
||||
spinlock_t lock;
|
||||
/* global list, used for the root cgroup in cgroup aware lrus */
|
||||
struct list_lru_one lru;
|
||||
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
|
||||
struct list_lru_memcg __rcu *memcg_lrus;
|
||||
#endif
|
||||
@@ -51,21 +51,25 @@ struct list_lru_node {
|
||||
|
||||
struct list_lru {
|
||||
struct list_lru_node *node;
|
||||
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
struct list_head list;
|
||||
int shrinker_id;
|
||||
#endif
|
||||
};
|
||||
|
||||
void list_lru_destroy(struct list_lru *lru);
|
||||
int __list_lru_init(struct list_lru *lru, bool memcg_aware,
|
||||
struct lock_class_key *key);
|
||||
struct lock_class_key *key, struct shrinker *shrinker);
|
||||
|
||||
#define list_lru_init(lru) __list_lru_init((lru), false, NULL)
|
||||
#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key))
|
||||
#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL)
|
||||
#define list_lru_init(lru) \
|
||||
__list_lru_init((lru), false, NULL, NULL)
|
||||
#define list_lru_init_key(lru, key) \
|
||||
__list_lru_init((lru), false, (key), NULL)
|
||||
#define list_lru_init_memcg(lru, shrinker) \
|
||||
__list_lru_init((lru), true, NULL, shrinker)
|
||||
|
||||
int memcg_update_all_list_lrus(int num_memcgs);
|
||||
void memcg_drain_all_list_lrus(int src_idx, int dst_idx);
|
||||
void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg);
|
||||
|
||||
/**
|
||||
* list_lru_add: add an element to the lru list's tail
|
||||
@@ -162,6 +166,23 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
|
||||
int nid, struct mem_cgroup *memcg,
|
||||
list_lru_walk_cb isolate, void *cb_arg,
|
||||
unsigned long *nr_to_walk);
|
||||
/**
|
||||
* list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
|
||||
* @lru: the lru pointer.
|
||||
* @nid: the node id to scan from.
|
||||
* @memcg: the cgroup to scan from.
|
||||
* @isolate: callback function that is resposible for deciding what to do with
|
||||
* the item currently being scanned
|
||||
* @cb_arg: opaque type that will be passed to @isolate
|
||||
* @nr_to_walk: how many items to scan.
|
||||
*
|
||||
* Same as @list_lru_walk_one except that the spinlock is acquired with
|
||||
* spin_lock_irq().
|
||||
*/
|
||||
unsigned long list_lru_walk_one_irq(struct list_lru *lru,
|
||||
int nid, struct mem_cgroup *memcg,
|
||||
list_lru_walk_cb isolate, void *cb_arg,
|
||||
unsigned long *nr_to_walk);
|
||||
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
|
||||
list_lru_walk_cb isolate, void *cb_arg,
|
||||
unsigned long *nr_to_walk);
|
||||
@@ -174,6 +195,14 @@ list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
|
||||
&sc->nr_to_scan);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
|
||||
list_lru_walk_cb isolate, void *cb_arg)
|
||||
{
|
||||
return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
|
||||
&sc->nr_to_scan);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
|
||||
void *cb_arg, unsigned long nr_to_walk)
|
||||
|
@@ -111,6 +111,15 @@ struct lruvec_stat {
|
||||
long count[NR_VM_NODE_STAT_ITEMS];
|
||||
};
|
||||
|
||||
/*
|
||||
* Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
|
||||
* which have elements charged to this memcg.
|
||||
*/
|
||||
struct memcg_shrinker_map {
|
||||
struct rcu_head rcu;
|
||||
unsigned long map[0];
|
||||
};
|
||||
|
||||
/*
|
||||
* per-zone information in memory controller.
|
||||
*/
|
||||
@@ -124,6 +133,9 @@ struct mem_cgroup_per_node {
|
||||
|
||||
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
|
||||
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
struct memcg_shrinker_map __rcu *shrinker_map;
|
||||
#endif
|
||||
struct rb_node tree_node; /* RB tree node */
|
||||
unsigned long usage_in_excess;/* Set to the value by which */
|
||||
/* the soft limit is exceeded*/
|
||||
@@ -271,7 +283,7 @@ struct mem_cgroup {
|
||||
bool tcpmem_active;
|
||||
int tcpmem_pressure;
|
||||
|
||||
#ifndef CONFIG_SLOB
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
/* Index in the kmem_cache->memcg_params.memcg_caches array */
|
||||
int kmemcg_id;
|
||||
enum memcg_kmem_state kmem_state;
|
||||
@@ -306,6 +318,11 @@ struct mem_cgroup {
|
||||
|
||||
extern struct mem_cgroup *root_mem_cgroup;
|
||||
|
||||
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
|
||||
{
|
||||
return (memcg == root_mem_cgroup);
|
||||
}
|
||||
|
||||
static inline bool mem_cgroup_disabled(void)
|
||||
{
|
||||
return !cgroup_subsys_enabled(memory_cgrp_subsys);
|
||||
@@ -373,11 +390,21 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
|
||||
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
|
||||
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
|
||||
|
||||
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
|
||||
|
||||
struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
|
||||
|
||||
static inline
|
||||
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
|
||||
return css ? container_of(css, struct mem_cgroup, css) : NULL;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
|
||||
{
|
||||
if (memcg)
|
||||
css_put(&memcg->css);
|
||||
}
|
||||
|
||||
#define mem_cgroup_from_counter(counter, member) \
|
||||
container_of(counter, struct mem_cgroup, member)
|
||||
|
||||
@@ -497,16 +524,16 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
|
||||
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
|
||||
struct task_struct *p);
|
||||
|
||||
static inline void mem_cgroup_oom_enable(void)
|
||||
static inline void mem_cgroup_enter_user_fault(void)
|
||||
{
|
||||
WARN_ON(current->memcg_may_oom);
|
||||
current->memcg_may_oom = 1;
|
||||
WARN_ON(current->in_user_fault);
|
||||
current->in_user_fault = 1;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_oom_disable(void)
|
||||
static inline void mem_cgroup_exit_user_fault(void)
|
||||
{
|
||||
WARN_ON(!current->memcg_may_oom);
|
||||
current->memcg_may_oom = 0;
|
||||
WARN_ON(!current->in_user_fault);
|
||||
current->in_user_fault = 0;
|
||||
}
|
||||
|
||||
static inline bool task_in_memcg_oom(struct task_struct *p)
|
||||
@@ -762,6 +789,11 @@ void mem_cgroup_split_huge_fixup(struct page *head);
|
||||
|
||||
struct mem_cgroup;
|
||||
|
||||
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool mem_cgroup_disabled(void)
|
||||
{
|
||||
return true;
|
||||
@@ -850,6 +882,20 @@ static inline bool task_in_mem_cgroup(struct task_struct *task,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *
|
||||
mem_cgroup_iter(struct mem_cgroup *root,
|
||||
struct mem_cgroup *prev,
|
||||
@@ -937,11 +983,11 @@ static inline void mem_cgroup_handle_over_high(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_oom_enable(void)
|
||||
static inline void mem_cgroup_enter_user_fault(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_oom_disable(void)
|
||||
static inline void mem_cgroup_exit_user_fault(void)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1207,7 +1253,7 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
|
||||
int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
|
||||
void memcg_kmem_uncharge(struct page *page, int order);
|
||||
|
||||
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
extern struct static_key_false memcg_kmem_enabled_key;
|
||||
extern struct workqueue_struct *memcg_kmem_cache_wq;
|
||||
|
||||
@@ -1238,6 +1284,10 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
|
||||
return memcg ? memcg->kmemcg_id : -1;
|
||||
}
|
||||
|
||||
extern int memcg_expand_shrinker_maps(int new_id);
|
||||
|
||||
extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
|
||||
int nid, int shrinker_id);
|
||||
#else
|
||||
#define for_each_memcg_cache_index(_idx) \
|
||||
for (; NULL; )
|
||||
@@ -1260,6 +1310,8 @@ static inline void memcg_put_cache_ids(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
|
||||
static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
|
||||
int nid, int shrinker_id) { }
|
||||
#endif /* CONFIG_MEMCG_KMEM */
|
||||
|
||||
#endif /* _LINUX_MEMCONTROL_H */
|
||||
|
@@ -2665,12 +2665,7 @@ extern int randomize_va_space;
|
||||
const char * arch_vma_name(struct vm_area_struct *vma);
|
||||
void print_vma_addr(char *prefix, unsigned long rip);
|
||||
|
||||
void sparse_mem_maps_populate_node(struct page **map_map,
|
||||
unsigned long pnum_begin,
|
||||
unsigned long pnum_end,
|
||||
unsigned long map_count,
|
||||
int nodeid);
|
||||
|
||||
void *sparse_buffer_alloc(unsigned long size);
|
||||
struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
|
||||
struct vmem_altmap *altmap);
|
||||
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
|
||||
@@ -2752,7 +2747,8 @@ extern void clear_huge_page(struct page *page,
|
||||
unsigned long addr_hint,
|
||||
unsigned int pages_per_huge_page);
|
||||
extern void copy_user_huge_page(struct page *dst, struct page *src,
|
||||
unsigned long addr, struct vm_area_struct *vma,
|
||||
unsigned long addr_hint,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned int pages_per_huge_page);
|
||||
extern long copy_huge_page_from_user(struct page *dst_page,
|
||||
const void __user *usr_src,
|
||||
|
@@ -33,10 +33,10 @@ typedef void (*node_registration_func_t)(struct node *);
|
||||
|
||||
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
|
||||
extern int link_mem_sections(int nid, unsigned long start_pfn,
|
||||
unsigned long nr_pages, bool check_nid);
|
||||
unsigned long end_pfn);
|
||||
#else
|
||||
static inline int link_mem_sections(int nid, unsigned long start_pfn,
|
||||
unsigned long nr_pages, bool check_nid)
|
||||
unsigned long end_pfn)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -54,12 +54,14 @@ static inline int register_one_node(int nid)
|
||||
|
||||
if (node_online(nid)) {
|
||||
struct pglist_data *pgdat = NODE_DATA(nid);
|
||||
unsigned long start_pfn = pgdat->node_start_pfn;
|
||||
unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
|
||||
|
||||
error = __register_one_node(nid);
|
||||
if (error)
|
||||
return error;
|
||||
/* link memory sections under this node */
|
||||
error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages, true);
|
||||
error = link_mem_sections(nid, start_pfn, end_pfn);
|
||||
}
|
||||
|
||||
return error;
|
||||
@@ -69,7 +71,7 @@ extern void unregister_one_node(int nid);
|
||||
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
|
||||
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
|
||||
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
|
||||
int nid, bool check_nid);
|
||||
void *arg);
|
||||
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
|
||||
unsigned long phys_index);
|
||||
|
||||
@@ -99,7 +101,7 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
|
||||
return 0;
|
||||
}
|
||||
static inline int register_mem_sect_under_node(struct memory_block *mem_blk,
|
||||
int nid, bool check_nid)
|
||||
void *arg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@@ -16,18 +16,7 @@ struct page_ext_operations {
|
||||
|
||||
#ifdef CONFIG_PAGE_EXTENSION
|
||||
|
||||
/*
|
||||
* page_ext->flags bits:
|
||||
*
|
||||
* PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
|
||||
* implement generic debug pagealloc feature. The pages are filled with
|
||||
* poison patterns and set this flag after free_pages(). The poisoned
|
||||
* pages are verified whether the patterns are not corrupted and clear
|
||||
* the flag before alloc_pages().
|
||||
*/
|
||||
|
||||
enum page_ext_flags {
|
||||
PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
|
||||
PAGE_EXT_DEBUG_GUARD,
|
||||
PAGE_EXT_OWNER,
|
||||
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
|
||||
@@ -61,7 +50,7 @@ static inline void page_ext_init(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
struct page_ext *lookup_page_ext(struct page *page);
|
||||
struct page_ext *lookup_page_ext(const struct page *page);
|
||||
|
||||
#else /* !CONFIG_PAGE_EXTENSION */
|
||||
struct page_ext;
|
||||
@@ -70,7 +59,7 @@ static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct page_ext *lookup_page_ext(struct page *page)
|
||||
static inline struct page_ext *lookup_page_ext(const struct page *page)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -722,8 +722,8 @@ struct task_struct {
|
||||
unsigned restore_sigmask:1;
|
||||
#endif
|
||||
#ifdef CONFIG_MEMCG
|
||||
unsigned memcg_may_oom:1;
|
||||
#ifndef CONFIG_SLOB
|
||||
unsigned in_user_fault:1;
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
unsigned memcg_kmem_skip_account:1;
|
||||
#endif
|
||||
#endif
|
||||
@@ -1152,6 +1152,9 @@ struct task_struct {
|
||||
|
||||
/* Number of pages to reclaim on returning to userland: */
|
||||
unsigned int memcg_nr_pages_over_high;
|
||||
|
||||
/* Used by memcontrol for targeted memcg charge: */
|
||||
struct mem_cgroup *active_memcg;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
|
@@ -248,6 +248,43 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
|
||||
current->flags = (current->flags & ~PF_MEMALLOC) | flags;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
/**
|
||||
* memalloc_use_memcg - Starts the remote memcg charging scope.
|
||||
* @memcg: memcg to charge.
|
||||
*
|
||||
* This function marks the beginning of the remote memcg charging scope. All the
|
||||
* __GFP_ACCOUNT allocations till the end of the scope will be charged to the
|
||||
* given memcg.
|
||||
*
|
||||
* NOTE: This function is not nesting safe.
|
||||
*/
|
||||
static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
WARN_ON_ONCE(current->active_memcg);
|
||||
current->active_memcg = memcg;
|
||||
}
|
||||
|
||||
/**
|
||||
* memalloc_unuse_memcg - Ends the remote memcg charging scope.
|
||||
*
|
||||
* This function marks the end of the remote memcg charging scope started by
|
||||
* memalloc_use_memcg().
|
||||
*/
|
||||
static inline void memalloc_unuse_memcg(void)
|
||||
{
|
||||
current->active_memcg = NULL;
|
||||
}
|
||||
#else
|
||||
static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void memalloc_unuse_memcg(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMBARRIER
|
||||
enum {
|
||||
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
|
||||
|
@@ -34,12 +34,15 @@ struct shrink_control {
|
||||
};
|
||||
|
||||
#define SHRINK_STOP (~0UL)
|
||||
#define SHRINK_EMPTY (~0UL - 1)
|
||||
/*
|
||||
* A callback you can register to apply pressure to ageable caches.
|
||||
*
|
||||
* @count_objects should return the number of freeable items in the cache. If
|
||||
* there are no objects to free or the number of freeable items cannot be
|
||||
* determined, it should return 0. No deadlock checks should be done during the
|
||||
* there are no objects to free, it should return SHRINK_EMPTY, while 0 is
|
||||
* returned in cases of the number of freeable items cannot be determined
|
||||
* or shrinker should skip this cache for this time (e.g., their number
|
||||
* is below shrinkable limit). No deadlock checks should be done during the
|
||||
* count callback - the shrinker relies on aggregating scan counts that couldn't
|
||||
* be executed due to potential deadlocks to be run at a later call when the
|
||||
* deadlock condition is no longer pending.
|
||||
@@ -66,6 +69,10 @@ struct shrinker {
|
||||
|
||||
/* These are for internal use */
|
||||
struct list_head list;
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
/* ID in shrinker_idr */
|
||||
int id;
|
||||
#endif
|
||||
/* objs pending delete, per node */
|
||||
atomic_long_t *nr_deferred;
|
||||
};
|
||||
|
@@ -97,7 +97,7 @@
|
||||
# define SLAB_FAILSLAB 0
|
||||
#endif
|
||||
/* Account to memcg */
|
||||
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
|
||||
#else
|
||||
# define SLAB_ACCOUNT 0
|
||||
|
@@ -5,12 +5,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
/*
|
||||
* Hash based on the page number. Provides a good hit rate for
|
||||
* workloads with good locality and those with random accesses as well.
|
||||
*/
|
||||
#define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK)
|
||||
|
||||
static inline void vmacache_flush(struct task_struct *tsk)
|
||||
{
|
||||
memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
|
||||
|
Reference in New Issue
Block a user