Merge branch 'akpm' (more incoming from Andrew)
Merge second patch-bomb from Andrew Morton: - A little DM fix - the MM queue * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (154 commits) ksm: allocate roots when needed mm: cleanup "swapcache" in do_swap_page mm,ksm: swapoff might need to copy mm,ksm: FOLL_MIGRATION do migration_entry_wait ksm: shrink 32-bit rmap_item back to 32 bytes ksm: treat unstable nid like in stable tree ksm: add some comments tmpfs: fix mempolicy object leaks tmpfs: fix use-after-free of mempolicy object mm/fadvise.c: drain all pagevecs if POSIX_FADV_DONTNEED fails to discard all pages mm: export mmu notifier invalidates mm: accelerate mm_populate() treatment of THP pages mm: use long type for page counts in mm_populate() and get_user_pages() mm: accurately document nr_free_*_pages functions with code comments HWPOISON: change order of error_states[]'s elements HWPOISON: fix misjudgement of page_action() for errors on mlocked pages memcg: stop warning on memcg_propagate_kmem net: change type of virtio_chan->p9_max_pages vmscan: change type of vm_total_pages to unsigned long fs/nfsd: change type of max_delegations, nfsd_drc_max_mem and nfsd_drc_mem_used ...
This commit is contained in:
@@ -485,6 +485,14 @@ static inline bool acpi_driver_match_device(struct device *dev,
|
||||
|
||||
#endif /* !CONFIG_ACPI */
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
void __init early_parse_srat(void);
|
||||
#else
|
||||
static inline void early_parse_srat(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
|
||||
u32 pm1a_ctrl, u32 pm1b_ctrl));
|
||||
|
@@ -53,6 +53,7 @@ extern void free_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long size);
|
||||
extern void free_bootmem(unsigned long physaddr, unsigned long size);
|
||||
extern void free_bootmem_late(unsigned long physaddr, unsigned long size);
|
||||
extern void __free_pages_bootmem(struct page *page, unsigned int order);
|
||||
|
||||
/*
|
||||
* Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
|
||||
|
@@ -23,7 +23,7 @@ extern int fragmentation_index(struct zone *zone, unsigned int order);
|
||||
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
||||
int order, gfp_t gfp_mask, nodemask_t *mask,
|
||||
bool sync, bool *contended);
|
||||
extern int compact_pgdat(pg_data_t *pgdat, int order);
|
||||
extern void compact_pgdat(pg_data_t *pgdat, int order);
|
||||
extern void reset_isolation_suitable(pg_data_t *pgdat);
|
||||
extern unsigned long compaction_suitable(struct zone *zone, int order);
|
||||
|
||||
@@ -80,9 +80,8 @@ static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
||||
return COMPACT_CONTINUE;
|
||||
}
|
||||
|
||||
static inline int compact_pgdat(pg_data_t *pgdat, int order)
|
||||
static inline void compact_pgdat(pg_data_t *pgdat, int order)
|
||||
{
|
||||
return COMPACT_CONTINUE;
|
||||
}
|
||||
|
||||
static inline void reset_isolation_suitable(pg_data_t *pgdat)
|
||||
|
@@ -25,6 +25,7 @@
|
||||
|
||||
int firmware_map_add_early(u64 start, u64 end, const char *type);
|
||||
int firmware_map_add_hotplug(u64 start, u64 end, const char *type);
|
||||
int firmware_map_remove(u64 start, u64 end, const char *type);
|
||||
|
||||
#else /* CONFIG_FIRMWARE_MEMMAP */
|
||||
|
||||
@@ -38,6 +39,11 @@ static inline int firmware_map_add_hotplug(u64 start, u64 end, const char *type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int firmware_map_remove(u64 start, u64 end, const char *type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_FIRMWARE_MEMMAP */
|
||||
|
||||
#endif /* _LINUX_FIRMWARE_MAP_H */
|
||||
|
@@ -219,12 +219,6 @@ static inline void zero_user(struct page *page,
|
||||
zero_user_segments(page, start, start + size, 0, 0);
|
||||
}
|
||||
|
||||
static inline void __deprecated memclear_highpage_flush(struct page *page,
|
||||
unsigned int offset, unsigned int size)
|
||||
{
|
||||
zero_user(page, offset, size);
|
||||
}
|
||||
|
||||
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
|
||||
|
||||
static inline void copy_user_highpage(struct page *to, struct page *from,
|
||||
|
@@ -113,7 +113,7 @@ extern void __split_huge_page_pmd(struct vm_area_struct *vma,
|
||||
do { \
|
||||
pmd_t *____pmd = (__pmd); \
|
||||
anon_vma_lock_write(__anon_vma); \
|
||||
anon_vma_unlock(__anon_vma); \
|
||||
anon_vma_unlock_write(__anon_vma); \
|
||||
BUG_ON(pmd_trans_splitting(*____pmd) || \
|
||||
pmd_trans_huge(*____pmd)); \
|
||||
} while (0)
|
||||
|
@@ -43,9 +43,9 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
|
||||
#endif
|
||||
|
||||
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
|
||||
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
|
||||
struct page **, struct vm_area_struct **,
|
||||
unsigned long *, int *, int, unsigned int flags);
|
||||
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
|
||||
struct page **, struct vm_area_struct **,
|
||||
unsigned long *, unsigned long *, long, unsigned int);
|
||||
void unmap_hugepage_range(struct vm_area_struct *,
|
||||
unsigned long, unsigned long, struct page *);
|
||||
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
||||
|
@@ -16,9 +16,6 @@
|
||||
struct stable_node;
|
||||
struct mem_cgroup;
|
||||
|
||||
struct page *ksm_does_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address);
|
||||
|
||||
#ifdef CONFIG_KSM
|
||||
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, int advice, unsigned long *vm_flags);
|
||||
@@ -73,15 +70,8 @@ static inline void set_page_stable_node(struct page *page,
|
||||
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
|
||||
* but what if the vma was unmerged while the page was swapped out?
|
||||
*/
|
||||
static inline int ksm_might_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
struct anon_vma *anon_vma = page_anon_vma(page);
|
||||
|
||||
return anon_vma &&
|
||||
(anon_vma->root != vma->anon_vma->root ||
|
||||
page->index != linear_page_index(vma, address));
|
||||
}
|
||||
struct page *ksm_might_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address);
|
||||
|
||||
int page_referenced_ksm(struct page *page,
|
||||
struct mem_cgroup *memcg, unsigned long *vm_flags);
|
||||
@@ -113,10 +103,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ksm_might_need_to_copy(struct page *page,
|
||||
static inline struct page *ksm_might_need_to_copy(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
return 0;
|
||||
return page;
|
||||
}
|
||||
|
||||
static inline int page_referenced_ksm(struct page *page,
|
||||
|
@@ -42,6 +42,7 @@ struct memblock {
|
||||
|
||||
extern struct memblock memblock;
|
||||
extern int memblock_debug;
|
||||
extern struct movablemem_map movablemem_map;
|
||||
|
||||
#define memblock_dbg(fmt, ...) \
|
||||
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
|
||||
@@ -60,6 +61,7 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
|
||||
void memblock_trim_memory(phys_addr_t align);
|
||||
|
||||
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||
|
||||
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
|
||||
unsigned long *out_end_pfn, int *out_nid);
|
||||
|
||||
|
@@ -116,7 +116,6 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
|
||||
* For memory reclaim.
|
||||
*/
|
||||
int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
|
||||
int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
|
||||
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
|
||||
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
|
||||
void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
|
||||
@@ -321,12 +320,6 @@ mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
||||
{
|
||||
|
@@ -96,6 +96,7 @@ extern void __online_page_free(struct page *page);
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
extern bool is_pageblock_removable_nolock(struct page *page);
|
||||
extern int arch_remove_memory(u64 start, u64 size);
|
||||
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
||||
|
||||
/* reasonably generic interface to expand the physical pages in a zone */
|
||||
@@ -173,17 +174,16 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
|
||||
#endif /* CONFIG_NUMA */
|
||||
#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
|
||||
extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
|
||||
#else
|
||||
static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
|
||||
{
|
||||
}
|
||||
static inline void put_page_bootmem(struct page *page)
|
||||
{
|
||||
}
|
||||
#else
|
||||
extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
|
||||
extern void put_page_bootmem(struct page *page);
|
||||
#endif
|
||||
extern void put_page_bootmem(struct page *page);
|
||||
extern void get_page_bootmem(unsigned long ingo, struct page *page,
|
||||
unsigned long type);
|
||||
|
||||
/*
|
||||
* Lock for memory hotplug guarantees 1) all callbacks for memory hotplug
|
||||
@@ -233,6 +233,7 @@ static inline void unlock_memory_hotplug(void) {}
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
|
||||
extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
|
||||
extern void try_offline_node(int nid);
|
||||
|
||||
#else
|
||||
static inline int is_mem_section_removable(unsigned long pfn,
|
||||
@@ -240,6 +241,8 @@ static inline int is_mem_section_removable(unsigned long pfn,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void try_offline_node(int nid) {}
|
||||
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
||||
|
||||
extern int mem_online_node(int nid);
|
||||
@@ -247,7 +250,8 @@ extern int add_memory(int nid, u64 start, u64 size);
|
||||
extern int arch_add_memory(int nid, u64 start, u64 size);
|
||||
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
|
||||
extern int offline_memory_block(struct memory_block *mem);
|
||||
extern int remove_memory(u64 start, u64 size);
|
||||
extern bool is_memblock_offlined(struct memory_block *mem);
|
||||
extern int remove_memory(int nid, u64 start, u64 size);
|
||||
extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
|
||||
int nr_pages);
|
||||
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
|
||||
|
@@ -40,11 +40,9 @@ extern void putback_movable_pages(struct list_head *l);
|
||||
extern int migrate_page(struct address_space *,
|
||||
struct page *, struct page *, enum migrate_mode);
|
||||
extern int migrate_pages(struct list_head *l, new_page_t x,
|
||||
unsigned long private, bool offlining,
|
||||
enum migrate_mode mode, int reason);
|
||||
unsigned long private, enum migrate_mode mode, int reason);
|
||||
extern int migrate_huge_page(struct page *, new_page_t x,
|
||||
unsigned long private, bool offlining,
|
||||
enum migrate_mode mode);
|
||||
unsigned long private, enum migrate_mode mode);
|
||||
|
||||
extern int fail_migrate_page(struct address_space *,
|
||||
struct page *, struct page *);
|
||||
@@ -62,11 +60,11 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||
static inline void putback_lru_pages(struct list_head *l) {}
|
||||
static inline void putback_movable_pages(struct list_head *l) {}
|
||||
static inline int migrate_pages(struct list_head *l, new_page_t x,
|
||||
unsigned long private, bool offlining,
|
||||
enum migrate_mode mode, int reason) { return -ENOSYS; }
|
||||
unsigned long private, enum migrate_mode mode, int reason)
|
||||
{ return -ENOSYS; }
|
||||
static inline int migrate_huge_page(struct page *page, new_page_t x,
|
||||
unsigned long private, bool offlining,
|
||||
enum migrate_mode mode) { return -ENOSYS; }
|
||||
unsigned long private, enum migrate_mode mode)
|
||||
{ return -ENOSYS; }
|
||||
|
||||
static inline int migrate_prep(void) { return -ENOSYS; }
|
||||
static inline int migrate_prep_local(void) { return -ENOSYS; }
|
||||
|
@@ -87,6 +87,7 @@ extern unsigned int kobjsize(const void *objp);
|
||||
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
|
||||
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
|
||||
|
||||
#define VM_POPULATE 0x00001000
|
||||
#define VM_LOCKED 0x00002000
|
||||
#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
|
||||
|
||||
@@ -366,7 +367,7 @@ static inline struct page *compound_head(struct page *page)
|
||||
* both from it and to it can be tracked, using atomic_inc_and_test
|
||||
* and atomic_add_negative(-1).
|
||||
*/
|
||||
static inline void reset_page_mapcount(struct page *page)
|
||||
static inline void page_mapcount_reset(struct page *page)
|
||||
{
|
||||
atomic_set(&(page)->_mapcount, -1);
|
||||
}
|
||||
@@ -580,50 +581,11 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
|
||||
* sets it, so none of the operations on it need to be atomic.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* page->flags layout:
|
||||
*
|
||||
* There are three possibilities for how page->flags get
|
||||
* laid out. The first is for the normal case, without
|
||||
* sparsemem. The second is for sparsemem when there is
|
||||
* plenty of space for node and section. The last is when
|
||||
* we have run out of space and have to fall back to an
|
||||
* alternate (slower) way of determining the node.
|
||||
*
|
||||
* No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
|
||||
* classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
|
||||
* classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
|
||||
*/
|
||||
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||
#define SECTIONS_WIDTH SECTIONS_SHIFT
|
||||
#else
|
||||
#define SECTIONS_WIDTH 0
|
||||
#endif
|
||||
|
||||
#define ZONES_WIDTH ZONES_SHIFT
|
||||
|
||||
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#define NODES_WIDTH NODES_SHIFT
|
||||
#else
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
#error "Vmemmap: No space for nodes field in page flags"
|
||||
#endif
|
||||
#define NODES_WIDTH 0
|
||||
#endif
|
||||
|
||||
/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
|
||||
/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_NID] | ... | FLAGS | */
|
||||
#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
|
||||
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
|
||||
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
|
||||
|
||||
/*
|
||||
* We are going to use the flags for the page to node mapping if its in
|
||||
* there. This includes the case where there is no node, so it is implicit.
|
||||
*/
|
||||
#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
|
||||
#define NODE_NOT_IN_PAGE_FLAGS
|
||||
#endif
|
||||
#define LAST_NID_PGOFF (ZONES_PGOFF - LAST_NID_WIDTH)
|
||||
|
||||
/*
|
||||
* Define the bit shifts to access each section. For non-existent
|
||||
@@ -633,6 +595,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
|
||||
#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
|
||||
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
|
||||
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
|
||||
#define LAST_NID_PGSHIFT (LAST_NID_PGOFF * (LAST_NID_WIDTH != 0))
|
||||
|
||||
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
|
||||
#ifdef NODE_NOT_IN_PAGE_FLAGS
|
||||
@@ -654,6 +617,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
|
||||
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
|
||||
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
|
||||
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
|
||||
#define LAST_NID_MASK ((1UL << LAST_NID_WIDTH) - 1)
|
||||
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
|
||||
|
||||
static inline enum zone_type page_zonenum(const struct page *page)
|
||||
@@ -661,6 +625,10 @@ static inline enum zone_type page_zonenum(const struct page *page)
|
||||
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||
#define SECTION_IN_PAGE_FLAGS
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The identification function is only used by the buddy allocator for
|
||||
* determining if two pages could be buddies. We are not really
|
||||
@@ -693,31 +661,48 @@ static inline int page_to_nid(const struct page *page)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
static inline int page_xchg_last_nid(struct page *page, int nid)
|
||||
#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
|
||||
static inline int page_nid_xchg_last(struct page *page, int nid)
|
||||
{
|
||||
return xchg(&page->_last_nid, nid);
|
||||
}
|
||||
|
||||
static inline int page_last_nid(struct page *page)
|
||||
static inline int page_nid_last(struct page *page)
|
||||
{
|
||||
return page->_last_nid;
|
||||
}
|
||||
static inline void reset_page_last_nid(struct page *page)
|
||||
static inline void page_nid_reset_last(struct page *page)
|
||||
{
|
||||
page->_last_nid = -1;
|
||||
}
|
||||
#else
|
||||
static inline int page_xchg_last_nid(struct page *page, int nid)
|
||||
static inline int page_nid_last(struct page *page)
|
||||
{
|
||||
return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK;
|
||||
}
|
||||
|
||||
extern int page_nid_xchg_last(struct page *page, int nid);
|
||||
|
||||
static inline void page_nid_reset_last(struct page *page)
|
||||
{
|
||||
int nid = (1 << LAST_NID_SHIFT) - 1;
|
||||
|
||||
page->flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
|
||||
page->flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
|
||||
}
|
||||
#endif /* LAST_NID_NOT_IN_PAGE_FLAGS */
|
||||
#else
|
||||
static inline int page_nid_xchg_last(struct page *page, int nid)
|
||||
{
|
||||
return page_to_nid(page);
|
||||
}
|
||||
|
||||
static inline int page_last_nid(struct page *page)
|
||||
static inline int page_nid_last(struct page *page)
|
||||
{
|
||||
return page_to_nid(page);
|
||||
}
|
||||
|
||||
static inline void reset_page_last_nid(struct page *page)
|
||||
static inline void page_nid_reset_last(struct page *page)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
@@ -727,7 +712,7 @@ static inline struct zone *page_zone(const struct page *page)
|
||||
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||
#ifdef SECTION_IN_PAGE_FLAGS
|
||||
static inline void set_page_section(struct page *page, unsigned long section)
|
||||
{
|
||||
page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
|
||||
@@ -757,7 +742,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
|
||||
{
|
||||
set_page_zone(page, zone);
|
||||
set_page_node(page, node);
|
||||
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||
#ifdef SECTION_IN_PAGE_FLAGS
|
||||
set_page_section(page, pfn_to_section_nr(pfn));
|
||||
#endif
|
||||
}
|
||||
@@ -817,18 +802,7 @@ void page_address_init(void);
|
||||
#define PAGE_MAPPING_KSM 2
|
||||
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
|
||||
|
||||
extern struct address_space swapper_space;
|
||||
static inline struct address_space *page_mapping(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
|
||||
VM_BUG_ON(PageSlab(page));
|
||||
if (unlikely(PageSwapCache(page)))
|
||||
mapping = &swapper_space;
|
||||
else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
|
||||
mapping = NULL;
|
||||
return mapping;
|
||||
}
|
||||
extern struct address_space *page_mapping(struct page *page);
|
||||
|
||||
/* Neutral page->mapping pointer to address_space or anon_vma or other */
|
||||
static inline void *page_rmapping(struct page *page)
|
||||
@@ -1035,18 +1009,18 @@ static inline int fixup_user_fault(struct task_struct *tsk,
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int make_pages_present(unsigned long addr, unsigned long end);
|
||||
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
|
||||
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
void *buf, int len, int write);
|
||||
|
||||
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, unsigned int foll_flags,
|
||||
struct page **pages, struct vm_area_struct **vmas,
|
||||
int *nonblocking);
|
||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int nr_pages, int write, int force,
|
||||
struct page **pages, struct vm_area_struct **vmas);
|
||||
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
unsigned int foll_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas, int *nonblocking);
|
||||
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
struct vm_area_struct **vmas);
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages);
|
||||
struct kvec;
|
||||
@@ -1359,6 +1333,24 @@ extern void free_bootmem_with_active_regions(int nid,
|
||||
unsigned long max_low_pfn);
|
||||
extern void sparse_memory_present_with_active_regions(int nid);
|
||||
|
||||
#define MOVABLEMEM_MAP_MAX MAX_NUMNODES
|
||||
struct movablemem_entry {
|
||||
unsigned long start_pfn; /* start pfn of memory segment */
|
||||
unsigned long end_pfn; /* end pfn of memory segment (exclusive) */
|
||||
};
|
||||
|
||||
struct movablemem_map {
|
||||
bool acpi; /* true if using SRAT info */
|
||||
int nr_map;
|
||||
struct movablemem_entry map[MOVABLEMEM_MAP_MAX];
|
||||
nodemask_t numa_nodes_hotplug; /* on which nodes we specify memory */
|
||||
nodemask_t numa_nodes_kernel; /* on which nodes kernel resides in */
|
||||
};
|
||||
|
||||
extern void __init insert_movablemem_map(unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
extern int __init movablemem_map_overlap(unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||
|
||||
#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
|
||||
@@ -1395,6 +1387,9 @@ extern void setup_per_cpu_pageset(void);
|
||||
extern void zone_pcp_update(struct zone *zone);
|
||||
extern void zone_pcp_reset(struct zone *zone);
|
||||
|
||||
/* page_alloc.c */
|
||||
extern int min_free_kbytes;
|
||||
|
||||
/* nommu.c */
|
||||
extern atomic_long_t mmap_pages_allocated;
|
||||
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
|
||||
@@ -1472,13 +1467,24 @@ extern int install_special_mapping(struct mm_struct *mm,
|
||||
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
|
||||
|
||||
extern unsigned long mmap_region(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long flags,
|
||||
vm_flags_t vm_flags, unsigned long pgoff);
|
||||
extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
|
||||
unsigned long, unsigned long,
|
||||
unsigned long, unsigned long);
|
||||
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
|
||||
extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long prot, unsigned long flags,
|
||||
unsigned long pgoff, unsigned long *populate);
|
||||
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern int __mm_populate(unsigned long addr, unsigned long len,
|
||||
int ignore_errors);
|
||||
static inline void mm_populate(unsigned long addr, unsigned long len)
|
||||
{
|
||||
/* Ignore errors */
|
||||
(void) __mm_populate(addr, len, 1);
|
||||
}
|
||||
#else
|
||||
static inline void mm_populate(unsigned long addr, unsigned long len) {}
|
||||
#endif
|
||||
|
||||
/* These take the mm semaphore themselves */
|
||||
extern unsigned long vm_brk(unsigned long, unsigned long);
|
||||
extern int vm_munmap(unsigned long, size_t);
|
||||
@@ -1623,8 +1629,17 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn);
|
||||
|
||||
struct page *follow_page(struct vm_area_struct *, unsigned long address,
|
||||
unsigned int foll_flags);
|
||||
struct page *follow_page_mask(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int foll_flags,
|
||||
unsigned int *page_mask);
|
||||
|
||||
static inline struct page *follow_page(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int foll_flags)
|
||||
{
|
||||
unsigned int unused_page_mask;
|
||||
return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
|
||||
}
|
||||
|
||||
#define FOLL_WRITE 0x01 /* check pte is writable */
|
||||
#define FOLL_TOUCH 0x02 /* mark page accessed */
|
||||
#define FOLL_GET 0x04 /* do get_page on page */
|
||||
@@ -1636,6 +1651,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
|
||||
#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
|
||||
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
|
||||
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
|
||||
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
|
||||
|
||||
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
|
||||
void *data);
|
||||
@@ -1707,7 +1723,11 @@ int vmemmap_populate_basepages(struct page *start_page,
|
||||
unsigned long pages, int node);
|
||||
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
|
||||
void vmemmap_populate_print_last(void);
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
void vmemmap_free(struct page *memmap, unsigned long nr_pages);
|
||||
#endif
|
||||
void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
|
||||
unsigned long size);
|
||||
|
||||
enum mf_flags {
|
||||
MF_COUNT_INCREASED = 1 << 0,
|
||||
@@ -1720,7 +1740,7 @@ extern int unpoison_memory(unsigned long pfn);
|
||||
extern int sysctl_memory_failure_early_kill;
|
||||
extern int sysctl_memory_failure_recovery;
|
||||
extern void shake_page(struct page *p, int access);
|
||||
extern atomic_long_t mce_bad_pages;
|
||||
extern atomic_long_t num_poisoned_pages;
|
||||
extern int soft_offline_page(struct page *page, int flags);
|
||||
|
||||
extern void dump_page(struct page *page);
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/page-debug-flags.h>
|
||||
#include <linux/uprobes.h>
|
||||
#include <linux/page-flags-layout.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
@@ -173,7 +174,7 @@ struct page {
|
||||
void *shadow;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
|
||||
int _last_nid;
|
||||
#endif
|
||||
}
|
||||
@@ -414,9 +415,9 @@ struct mm_struct {
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
/*
|
||||
* numa_next_scan is the next time when the PTEs will me marked
|
||||
* pte_numa to gather statistics and migrate pages to new nodes
|
||||
* if necessary
|
||||
* numa_next_scan is the next time that the PTEs will be marked
|
||||
* pte_numa. NUMA hinting faults will gather statistics and migrate
|
||||
* pages to new nodes if necessary.
|
||||
*/
|
||||
unsigned long numa_next_scan;
|
||||
|
||||
|
@@ -79,6 +79,8 @@ calc_vm_flag_bits(unsigned long flags)
|
||||
{
|
||||
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
|
||||
_calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
|
||||
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
|
||||
((flags & MAP_LOCKED) ? (VM_LOCKED | VM_POPULATE) : 0) |
|
||||
(((flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE) ?
|
||||
VM_POPULATE : 0);
|
||||
}
|
||||
#endif /* _LINUX_MMAN_H */
|
||||
|
@@ -15,7 +15,7 @@
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/pageblock-flags.h>
|
||||
#include <generated/bounds.h>
|
||||
#include <linux/page-flags-layout.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
@@ -57,7 +57,9 @@ enum {
|
||||
*/
|
||||
MIGRATE_CMA,
|
||||
#endif
|
||||
#ifdef CONFIG_MEMORY_ISOLATION
|
||||
MIGRATE_ISOLATE, /* can't allocate from here */
|
||||
#endif
|
||||
MIGRATE_TYPES
|
||||
};
|
||||
|
||||
@@ -308,24 +310,6 @@ enum zone_type {
|
||||
|
||||
#ifndef __GENERATING_BOUNDS_H
|
||||
|
||||
/*
|
||||
* When a memory allocation must conform to specific limitations (such
|
||||
* as being suitable for DMA) the caller will pass in hints to the
|
||||
* allocator in the gfp_mask, in the zone modifier bits. These bits
|
||||
* are used to select a priority ordered list of memory zones which
|
||||
* match the requested limits. See gfp_zone() in include/linux/gfp.h
|
||||
*/
|
||||
|
||||
#if MAX_NR_ZONES < 2
|
||||
#define ZONES_SHIFT 0
|
||||
#elif MAX_NR_ZONES <= 2
|
||||
#define ZONES_SHIFT 1
|
||||
#elif MAX_NR_ZONES <= 4
|
||||
#define ZONES_SHIFT 2
|
||||
#else
|
||||
#error ZONES_SHIFT -- too many zones configured adjust calculation
|
||||
#endif
|
||||
|
||||
struct zone {
|
||||
/* Fields commonly accessed by the page allocator */
|
||||
|
||||
@@ -543,6 +527,26 @@ static inline int zone_is_oom_locked(const struct zone *zone)
|
||||
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
|
||||
}
|
||||
|
||||
static inline unsigned zone_end_pfn(const struct zone *zone)
|
||||
{
|
||||
return zone->zone_start_pfn + zone->spanned_pages;
|
||||
}
|
||||
|
||||
static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
|
||||
{
|
||||
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
|
||||
}
|
||||
|
||||
static inline bool zone_is_initialized(struct zone *zone)
|
||||
{
|
||||
return !!zone->wait_table;
|
||||
}
|
||||
|
||||
static inline bool zone_is_empty(struct zone *zone)
|
||||
{
|
||||
return zone->spanned_pages == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The "priority" of VM scanning is how much of the queues we will scan in one
|
||||
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
|
||||
@@ -752,11 +756,17 @@ typedef struct pglist_data {
|
||||
#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
|
||||
|
||||
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
|
||||
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
|
||||
|
||||
#define node_end_pfn(nid) ({\
|
||||
pg_data_t *__pgdat = NODE_DATA(nid);\
|
||||
__pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
|
||||
})
|
||||
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
|
||||
{
|
||||
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
|
||||
}
|
||||
|
||||
static inline bool pgdat_is_empty(pg_data_t *pgdat)
|
||||
{
|
||||
return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
|
||||
}
|
||||
|
||||
#include <linux/memory_hotplug.h>
|
||||
|
||||
@@ -1053,8 +1063,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
|
||||
* PA_SECTION_SHIFT physical address to/from section number
|
||||
* PFN_SECTION_SHIFT pfn to/from section number
|
||||
*/
|
||||
#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
|
||||
|
||||
#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
|
||||
#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
|
||||
|
||||
|
88
include/linux/page-flags-layout.h
Normal file
88
include/linux/page-flags-layout.h
Normal file
@@ -0,0 +1,88 @@
|
||||
#ifndef PAGE_FLAGS_LAYOUT_H
|
||||
#define PAGE_FLAGS_LAYOUT_H
|
||||
|
||||
#include <linux/numa.h>
|
||||
#include <generated/bounds.h>
|
||||
|
||||
/*
|
||||
* When a memory allocation must conform to specific limitations (such
|
||||
* as being suitable for DMA) the caller will pass in hints to the
|
||||
* allocator in the gfp_mask, in the zone modifier bits. These bits
|
||||
* are used to select a priority ordered list of memory zones which
|
||||
* match the requested limits. See gfp_zone() in include/linux/gfp.h
|
||||
*/
|
||||
#if MAX_NR_ZONES < 2
|
||||
#define ZONES_SHIFT 0
|
||||
#elif MAX_NR_ZONES <= 2
|
||||
#define ZONES_SHIFT 1
|
||||
#elif MAX_NR_ZONES <= 4
|
||||
#define ZONES_SHIFT 2
|
||||
#else
|
||||
#error ZONES_SHIFT -- too many zones configured adjust calculation
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
#include <asm/sparsemem.h>
|
||||
|
||||
/* SECTION_SHIFT #bits space required to store a section # */
|
||||
#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
|
||||
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
/*
|
||||
* page->flags layout:
|
||||
*
|
||||
* There are five possibilities for how page->flags get laid out. The first
|
||||
* pair is for the normal case without sparsemem. The second pair is for
|
||||
* sparsemem when there is plenty of space for node and section information.
|
||||
* The last is when there is insufficient space in page->flags and a separate
|
||||
* lookup is necessary.
|
||||
*
|
||||
* No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
|
||||
* " plus space for last_nid: | NODE | ZONE | LAST_NID ... | FLAGS |
|
||||
* classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
|
||||
* " plus space for last_nid: | SECTION | NODE | ZONE | LAST_NID ... | FLAGS |
|
||||
* classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
|
||||
*/
|
||||
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||
#define SECTIONS_WIDTH SECTIONS_SHIFT
|
||||
#else
|
||||
#define SECTIONS_WIDTH 0
|
||||
#endif
|
||||
|
||||
#define ZONES_WIDTH ZONES_SHIFT
|
||||
|
||||
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#define NODES_WIDTH NODES_SHIFT
|
||||
#else
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
#error "Vmemmap: No space for nodes field in page flags"
|
||||
#endif
|
||||
#define NODES_WIDTH 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
#define LAST_NID_SHIFT NODES_SHIFT
|
||||
#else
|
||||
#define LAST_NID_SHIFT 0
|
||||
#endif
|
||||
|
||||
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_NID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#define LAST_NID_WIDTH LAST_NID_SHIFT
|
||||
#else
|
||||
#define LAST_NID_WIDTH 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We are going to use the flags for the page to node mapping if its in
|
||||
* there. This includes the case where there is no node, so it is implicit.
|
||||
*/
|
||||
#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
|
||||
#define NODE_NOT_IN_PAGE_FLAGS
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_NUMA_BALANCING) && LAST_NID_WIDTH == 0
|
||||
#define LAST_NID_NOT_IN_PAGE_FLAGS
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
|
@@ -1,6 +1,25 @@
|
||||
#ifndef __LINUX_PAGEISOLATION_H
|
||||
#define __LINUX_PAGEISOLATION_H
|
||||
|
||||
#ifdef CONFIG_MEMORY_ISOLATION
|
||||
static inline bool is_migrate_isolate_page(struct page *page)
|
||||
{
|
||||
return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
|
||||
}
|
||||
static inline bool is_migrate_isolate(int migratetype)
|
||||
{
|
||||
return migratetype == MIGRATE_ISOLATE;
|
||||
}
|
||||
#else
|
||||
static inline bool is_migrate_isolate_page(struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool is_migrate_isolate(int migratetype)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
|
||||
bool skip_hwpoisoned_pages);
|
||||
|
@@ -537,6 +537,7 @@ struct dev_pm_info {
|
||||
unsigned int irq_safe:1;
|
||||
unsigned int use_autosuspend:1;
|
||||
unsigned int timer_autosuspends:1;
|
||||
unsigned int memalloc_noio:1;
|
||||
enum rpm_request request;
|
||||
enum rpm_status runtime_status;
|
||||
int runtime_error;
|
||||
|
@@ -47,6 +47,7 @@ extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
|
||||
extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
|
||||
extern void pm_runtime_update_max_time_suspended(struct device *dev,
|
||||
s64 delta_ns);
|
||||
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
|
||||
|
||||
static inline bool pm_children_suspended(struct device *dev)
|
||||
{
|
||||
@@ -156,6 +157,8 @@ static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
|
||||
int delay) {}
|
||||
static inline unsigned long pm_runtime_autosuspend_expiration(
|
||||
struct device *dev) { return 0; }
|
||||
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
|
||||
bool enable){}
|
||||
|
||||
#endif /* !CONFIG_PM_RUNTIME */
|
||||
|
||||
|
@@ -123,7 +123,7 @@ static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
|
||||
down_write(&anon_vma->root->rwsem);
|
||||
}
|
||||
|
||||
static inline void anon_vma_unlock(struct anon_vma *anon_vma)
|
||||
static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
|
||||
{
|
||||
up_write(&anon_vma->root->rwsem);
|
||||
}
|
||||
|
@@ -51,6 +51,7 @@ struct sched_param {
|
||||
#include <linux/cred.h>
|
||||
#include <linux/llist.h>
|
||||
#include <linux/uidgid.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
||||
@@ -1791,6 +1792,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
|
||||
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
||||
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
||||
#define PF_KSWAPD 0x00040000 /* I am kswapd */
|
||||
#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
|
||||
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
|
||||
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
|
||||
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
|
||||
@@ -1828,6 +1830,26 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
|
||||
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
|
||||
#define used_math() tsk_used_math(current)
|
||||
|
||||
/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
|
||||
static inline gfp_t memalloc_noio_flags(gfp_t flags)
|
||||
{
|
||||
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
|
||||
flags &= ~__GFP_IO;
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline unsigned int memalloc_noio_save(void)
|
||||
{
|
||||
unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
|
||||
current->flags |= PF_MEMALLOC_NOIO;
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void memalloc_noio_restore(unsigned int flags)
|
||||
{
|
||||
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* task->jobctl flags
|
||||
*/
|
||||
|
@@ -8,7 +8,7 @@
|
||||
#include <linux/memcontrol.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/node.h>
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
@@ -156,7 +156,7 @@ enum {
|
||||
SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
|
||||
};
|
||||
|
||||
#define SWAP_CLUSTER_MAX 32
|
||||
#define SWAP_CLUSTER_MAX 32UL
|
||||
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
|
||||
|
||||
/*
|
||||
@@ -202,6 +202,18 @@ struct swap_info_struct {
|
||||
unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
|
||||
atomic_t frontswap_pages; /* frontswap pages in-use counter */
|
||||
#endif
|
||||
spinlock_t lock; /*
|
||||
* protect map scan related fields like
|
||||
* swap_map, lowest_bit, highest_bit,
|
||||
* inuse_pages, cluster_next,
|
||||
* cluster_nr, lowest_alloc and
|
||||
* highest_alloc. other fields are only
|
||||
* changed at swapon/swapoff, so are
|
||||
* protected by swap_lock. changing
|
||||
* flags need hold this lock and
|
||||
* swap_lock. If both locks need hold,
|
||||
* hold swap_lock first.
|
||||
*/
|
||||
};
|
||||
|
||||
struct swap_list_t {
|
||||
@@ -209,15 +221,12 @@ struct swap_list_t {
|
||||
int next; /* swapfile to be used next */
|
||||
};
|
||||
|
||||
/* Swap 50% full? Release swapcache more aggressively.. */
|
||||
#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
|
||||
|
||||
/* linux/mm/page_alloc.c */
|
||||
extern unsigned long totalram_pages;
|
||||
extern unsigned long totalreserve_pages;
|
||||
extern unsigned long dirty_balance_reserve;
|
||||
extern unsigned int nr_free_buffer_pages(void);
|
||||
extern unsigned int nr_free_pagecache_pages(void);
|
||||
extern unsigned long nr_free_buffer_pages(void);
|
||||
extern unsigned long nr_free_pagecache_pages(void);
|
||||
|
||||
/* Definition of global_page_state not available yet */
|
||||
#define nr_free_pages() global_page_state(NR_FREE_PAGES)
|
||||
@@ -266,7 +275,7 @@ extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
|
||||
extern unsigned long shrink_all_memory(unsigned long nr_pages);
|
||||
extern int vm_swappiness;
|
||||
extern int remove_mapping(struct address_space *mapping, struct page *page);
|
||||
extern long vm_total_pages;
|
||||
extern unsigned long vm_total_pages;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int zone_reclaim_mode;
|
||||
@@ -330,8 +339,9 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *,
|
||||
sector_t *);
|
||||
|
||||
/* linux/mm/swap_state.c */
|
||||
extern struct address_space swapper_space;
|
||||
#define total_swapcache_pages swapper_space.nrpages
|
||||
extern struct address_space swapper_spaces[];
|
||||
#define swap_address_space(entry) (&swapper_spaces[swp_type(entry)])
|
||||
extern unsigned long total_swapcache_pages(void);
|
||||
extern void show_swap_cache_info(void);
|
||||
extern int add_to_swap(struct page *);
|
||||
extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
|
||||
@@ -346,8 +356,20 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
|
||||
struct vm_area_struct *vma, unsigned long addr);
|
||||
|
||||
/* linux/mm/swapfile.c */
|
||||
extern long nr_swap_pages;
|
||||
extern atomic_long_t nr_swap_pages;
|
||||
extern long total_swap_pages;
|
||||
|
||||
/* Swap 50% full? Release swapcache more aggressively.. */
|
||||
static inline bool vm_swap_full(void)
|
||||
{
|
||||
return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
|
||||
}
|
||||
|
||||
static inline long get_nr_swap_pages(void)
|
||||
{
|
||||
return atomic_long_read(&nr_swap_pages);
|
||||
}
|
||||
|
||||
extern void si_swapinfo(struct sysinfo *);
|
||||
extern swp_entry_t get_swap_page(void);
|
||||
extern swp_entry_t get_swap_page_of_type(int);
|
||||
@@ -380,9 +402,10 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
|
||||
|
||||
#else /* CONFIG_SWAP */
|
||||
|
||||
#define nr_swap_pages 0L
|
||||
#define get_nr_swap_pages() 0L
|
||||
#define total_swap_pages 0L
|
||||
#define total_swapcache_pages 0UL
|
||||
#define total_swapcache_pages() 0UL
|
||||
#define vm_swap_full() 0
|
||||
|
||||
#define si_swapinfo(val) \
|
||||
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
|
||||
|
@@ -36,7 +36,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
||||
#endif
|
||||
PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
|
||||
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
|
||||
KSWAPD_SKIP_CONGESTION_WAIT,
|
||||
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
NUMA_PTE_UPDATES,
|
||||
|
@@ -85,7 +85,7 @@ static inline void vm_events_fold_cpu(int cpu)
|
||||
#define count_vm_numa_events(x, y) count_vm_events(x, y)
|
||||
#else
|
||||
#define count_vm_numa_event(x) do {} while (0)
|
||||
#define count_vm_numa_events(x, y) do {} while (0)
|
||||
#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
|
||||
#endif /* CONFIG_NUMA_BALANCING */
|
||||
|
||||
#define __count_zone_vm_events(item, zone, delta) \
|
||||
|
Reference in New Issue
Block a user