Merge branch 'akpm' (incoming from Andrew Morton)
Merge misc fixes from Andrew Morton: "21 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (21 commits) mm/balloon_compaction: fix deflation when compaction is disabled sh: fix sh770x SCIF memory regions zram: avoid NULL pointer access in concurrent situation mm/slab_common: don't check for duplicate cache names ocfs2: fix d_splice_alias() return code checking mm: rmap: split out page_remove_file_rmap() mm: memcontrol: fix missed end-writeback page accounting mm: page-writeback: inline account_page_dirtied() into single caller lib/bitmap.c: fix undefined shift in __bitmap_shift_{left|right}() drivers/rtc/rtc-bq32k.c: fix register value memory-hotplug: clear pgdat which is allocated by bootmem in try_offline_node() drivers/rtc/rtc-s3c.c: fix initialization failure without rtc source clock kernel/kmod: fix use-after-free of the sub_info structure drivers/rtc/rtc-pm8xxx.c: rework to support pm8941 rtc mm, thp: fix collapsing of hugepages on madvise drivers: of: add return value to of_reserved_mem_device_init() mm: free compound page with correct order gcov: add ARM64 to GCOV_PROFILE_ALL fsnotify: next_i is freed during fsnotify_unmount_inodes. mm/compaction.c: avoid premature range skip in isolate_migratepages_range ...
This commit is contained in:
@@ -6,7 +6,8 @@
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
extern int __khugepaged_enter(struct mm_struct *mm);
|
||||
extern void __khugepaged_exit(struct mm_struct *mm);
|
||||
extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma);
|
||||
extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
||||
unsigned long vm_flags);
|
||||
|
||||
#define khugepaged_enabled() \
|
||||
(transparent_hugepage_flags & \
|
||||
@@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
|
||||
__khugepaged_exit(mm);
|
||||
}
|
||||
|
||||
static inline int khugepaged_enter(struct vm_area_struct *vma)
|
||||
static inline int khugepaged_enter(struct vm_area_struct *vma,
|
||||
unsigned long vm_flags)
|
||||
{
|
||||
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
|
||||
if ((khugepaged_always() ||
|
||||
(khugepaged_req_madv() &&
|
||||
vma->vm_flags & VM_HUGEPAGE)) &&
|
||||
!(vma->vm_flags & VM_NOHUGEPAGE))
|
||||
(khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
|
||||
!(vm_flags & VM_NOHUGEPAGE))
|
||||
if (__khugepaged_enter(vma->vm_mm))
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
@@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
static inline void khugepaged_exit(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
static inline int khugepaged_enter(struct vm_area_struct *vma)
|
||||
static inline int khugepaged_enter(struct vm_area_struct *vma,
|
||||
unsigned long vm_flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
|
||||
static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
||||
unsigned long vm_flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@@ -139,48 +139,23 @@ static inline bool mem_cgroup_disabled(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
|
||||
unsigned long *flags);
|
||||
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
|
||||
unsigned long *flags);
|
||||
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
|
||||
unsigned long flags);
|
||||
void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
|
||||
enum mem_cgroup_stat_index idx, int val);
|
||||
|
||||
extern atomic_t memcg_moving;
|
||||
|
||||
static inline void mem_cgroup_begin_update_page_stat(struct page *page,
|
||||
bool *locked, unsigned long *flags)
|
||||
{
|
||||
if (mem_cgroup_disabled())
|
||||
return;
|
||||
rcu_read_lock();
|
||||
*locked = false;
|
||||
if (atomic_read(&memcg_moving))
|
||||
__mem_cgroup_begin_update_page_stat(page, locked, flags);
|
||||
}
|
||||
|
||||
void __mem_cgroup_end_update_page_stat(struct page *page,
|
||||
unsigned long *flags);
|
||||
static inline void mem_cgroup_end_update_page_stat(struct page *page,
|
||||
bool *locked, unsigned long *flags)
|
||||
{
|
||||
if (mem_cgroup_disabled())
|
||||
return;
|
||||
if (*locked)
|
||||
__mem_cgroup_end_update_page_stat(page, flags);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void mem_cgroup_update_page_stat(struct page *page,
|
||||
enum mem_cgroup_stat_index idx,
|
||||
int val);
|
||||
|
||||
static inline void mem_cgroup_inc_page_stat(struct page *page,
|
||||
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
|
||||
enum mem_cgroup_stat_index idx)
|
||||
{
|
||||
mem_cgroup_update_page_stat(page, idx, 1);
|
||||
mem_cgroup_update_page_stat(memcg, idx, 1);
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_dec_page_stat(struct page *page,
|
||||
static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
|
||||
enum mem_cgroup_stat_index idx)
|
||||
{
|
||||
mem_cgroup_update_page_stat(page, idx, -1);
|
||||
mem_cgroup_update_page_stat(memcg, idx, -1);
|
||||
}
|
||||
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||
@@ -315,13 +290,14 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_begin_update_page_stat(struct page *page,
|
||||
static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
|
||||
bool *locked, unsigned long *flags)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_end_update_page_stat(struct page *page,
|
||||
bool *locked, unsigned long *flags)
|
||||
static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
|
||||
bool locked, unsigned long flags)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -343,12 +319,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_inc_page_stat(struct page *page,
|
||||
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
|
||||
enum mem_cgroup_stat_index idx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_dec_page_stat(struct page *page,
|
||||
static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
|
||||
enum mem_cgroup_stat_index idx)
|
||||
{
|
||||
}
|
||||
|
@@ -1235,7 +1235,6 @@ int __set_page_dirty_no_writeback(struct page *page);
|
||||
int redirty_page_for_writepage(struct writeback_control *wbc,
|
||||
struct page *page);
|
||||
void account_page_dirtied(struct page *page, struct address_space *mapping);
|
||||
void account_page_writeback(struct page *page);
|
||||
int set_page_dirty(struct page *page);
|
||||
int set_page_dirty_lock(struct page *page);
|
||||
int clear_page_dirty_for_io(struct page *page);
|
||||
|
@@ -16,7 +16,7 @@ struct reserved_mem {
|
||||
};
|
||||
|
||||
struct reserved_mem_ops {
|
||||
void (*device_init)(struct reserved_mem *rmem,
|
||||
int (*device_init)(struct reserved_mem *rmem,
|
||||
struct device *dev);
|
||||
void (*device_release)(struct reserved_mem *rmem,
|
||||
struct device *dev);
|
||||
@@ -28,14 +28,17 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
|
||||
_OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
|
||||
|
||||
#ifdef CONFIG_OF_RESERVED_MEM
|
||||
void of_reserved_mem_device_init(struct device *dev);
|
||||
int of_reserved_mem_device_init(struct device *dev);
|
||||
void of_reserved_mem_device_release(struct device *dev);
|
||||
|
||||
void fdt_init_reserved_mem(void);
|
||||
void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
|
||||
phys_addr_t base, phys_addr_t size);
|
||||
#else
|
||||
static inline void of_reserved_mem_device_init(struct device *dev) { }
|
||||
static inline int of_reserved_mem_device_init(struct device *dev)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void of_reserved_mem_device_release(struct device *pdev) { }
|
||||
|
||||
static inline void fdt_init_reserved_mem(void) { }
|
||||
|
Reference in New Issue
Block a user