Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "27 fixes. There are three patches that aren't actually fixes. They're simple function renamings which are nice-to-have in mainline as ongoing net development depends on them." * akpm: (27 commits) timerfd: export defines to userspace mm/hugetlb.c: fix reservation race when freeing surplus pages mm/slab.c: fix SLAB freelist randomization duplicate entries zram: support BDI_CAP_STABLE_WRITES zram: revalidate disk under init_lock mm: support anonymous stable page mm: add documentation for page fragment APIs mm: rename __page_frag functions to __page_frag_cache, drop order from drain mm: rename __alloc_page_frag to page_frag_alloc and __free_page_frag to page_frag_free mm, memcg: fix the active list aging for lowmem requests when memcg is enabled mm: don't dereference struct page fields of invalid pages mailmap: add codeaurora.org names for nameless email commits signal: protect SIGNAL_UNKILLABLE from unintentional clearing. mm: pmd dirty emulation in page fault handler ipc/sem.c: fix incorrect sem_lock pairing lib/Kconfig.debug: fix frv build failure mm: get rid of __GFP_OTHER_NODE mm: fix remote numa hits statistics mm: fix devm_memremap_pages crash, use mem_hotplug_{begin, done} ocfs2: fix crash caused by stale lvb with fsdlm plugin ...
This commit is contained in:
@@ -38,9 +38,8 @@ struct vm_area_struct;
|
||||
#define ___GFP_ACCOUNT 0x100000u
|
||||
#define ___GFP_NOTRACK 0x200000u
|
||||
#define ___GFP_DIRECT_RECLAIM 0x400000u
|
||||
#define ___GFP_OTHER_NODE 0x800000u
|
||||
#define ___GFP_WRITE 0x1000000u
|
||||
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
|
||||
#define ___GFP_WRITE 0x800000u
|
||||
#define ___GFP_KSWAPD_RECLAIM 0x1000000u
|
||||
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
|
||||
|
||||
/*
|
||||
@@ -172,11 +171,6 @@ struct vm_area_struct;
|
||||
* __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
|
||||
* distinguishing in the source between false positives and allocations that
|
||||
* cannot be supported (e.g. page tables).
|
||||
*
|
||||
* __GFP_OTHER_NODE is for allocations that are on a remote node but that
|
||||
* should not be accounted for as a remote allocation in vmstat. A
|
||||
* typical user would be khugepaged collapsing a huge page on a remote
|
||||
* node.
|
||||
*/
|
||||
#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
|
||||
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
|
||||
@@ -184,10 +178,9 @@ struct vm_area_struct;
|
||||
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
|
||||
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
|
||||
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
|
||||
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
|
||||
|
||||
/* Room for N __GFP_FOO bits */
|
||||
#define __GFP_BITS_SHIFT 26
|
||||
#define __GFP_BITS_SHIFT 25
|
||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
|
||||
/*
|
||||
@@ -506,11 +499,10 @@ extern void free_hot_cold_page(struct page *page, bool cold);
|
||||
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
|
||||
|
||||
struct page_frag_cache;
|
||||
extern void __page_frag_drain(struct page *page, unsigned int order,
|
||||
unsigned int count);
|
||||
extern void *__alloc_page_frag(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask);
|
||||
extern void __free_page_frag(void *addr);
|
||||
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
|
||||
extern void *page_frag_alloc(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask);
|
||||
extern void page_frag_free(void *addr);
|
||||
|
||||
#define __free_page(page) __free_pages((page), 0)
|
||||
#define free_page(addr) free_pages((addr), 0)
|
||||
|
@@ -120,7 +120,7 @@ struct mem_cgroup_reclaim_iter {
|
||||
*/
|
||||
struct mem_cgroup_per_node {
|
||||
struct lruvec lruvec;
|
||||
unsigned long lru_size[NR_LRU_LISTS];
|
||||
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
|
||||
|
||||
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
|
||||
|
||||
@@ -432,7 +432,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
|
||||
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
|
||||
|
||||
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
|
||||
int nr_pages);
|
||||
int zid, int nr_pages);
|
||||
|
||||
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
|
||||
int nid, unsigned int lru_mask);
|
||||
@@ -441,9 +441,23 @@ static inline
|
||||
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
||||
{
|
||||
struct mem_cgroup_per_node *mz;
|
||||
unsigned long nr_pages = 0;
|
||||
int zid;
|
||||
|
||||
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
||||
return mz->lru_size[lru];
|
||||
for (zid = 0; zid < MAX_NR_ZONES; zid++)
|
||||
nr_pages += mz->lru_zone_size[zid][lru];
|
||||
return nr_pages;
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
|
||||
enum lru_list lru, int zone_idx)
|
||||
{
|
||||
struct mem_cgroup_per_node *mz;
|
||||
|
||||
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
||||
return mz->lru_zone_size[zone_idx][lru];
|
||||
}
|
||||
|
||||
void mem_cgroup_handle_over_high(void);
|
||||
@@ -671,6 +685,12 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline
|
||||
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
|
||||
enum lru_list lru, int zone_idx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
|
||||
|
@@ -1210,8 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
|
||||
struct vm_area_struct *vma);
|
||||
void unmap_mapping_range(struct address_space *mapping,
|
||||
loff_t const holebegin, loff_t const holelen, int even_cows);
|
||||
int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
|
||||
spinlock_t **ptlp);
|
||||
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
||||
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
|
||||
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long *pfn);
|
||||
int follow_phys(struct vm_area_struct *vma, unsigned long address,
|
||||
|
@@ -39,7 +39,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
|
||||
{
|
||||
__update_lru_size(lruvec, lru, zid, nr_pages);
|
||||
#ifdef CONFIG_MEMCG
|
||||
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
|
||||
mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@@ -854,6 +854,16 @@ struct signal_struct {
|
||||
|
||||
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
|
||||
|
||||
#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
|
||||
SIGNAL_STOP_CONTINUED)
|
||||
|
||||
static inline void signal_set_stop_flags(struct signal_struct *sig,
|
||||
unsigned int flags)
|
||||
{
|
||||
WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
|
||||
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
|
||||
}
|
||||
|
||||
/* If true, all threads except ->group_exit_task have pending SIGKILL */
|
||||
static inline int signal_group_exit(const struct signal_struct *sig)
|
||||
{
|
||||
|
@@ -2480,7 +2480,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
|
||||
|
||||
static inline void skb_free_frag(void *addr)
|
||||
{
|
||||
__free_page_frag(addr);
|
||||
page_frag_free(addr);
|
||||
}
|
||||
|
||||
void *napi_alloc_frag(unsigned int fragsz);
|
||||
|
@@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
|
||||
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
|
||||
*/
|
||||
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
|
||||
#ifndef KMALLOC_SHIFT_LOW
|
||||
#define KMALLOC_SHIFT_LOW 3
|
||||
#endif
|
||||
@@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
|
||||
* be allocated from the same page.
|
||||
*/
|
||||
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
|
||||
#define KMALLOC_SHIFT_MAX 30
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
|
||||
#ifndef KMALLOC_SHIFT_LOW
|
||||
#define KMALLOC_SHIFT_LOW 3
|
||||
#endif
|
||||
|
@@ -150,8 +150,9 @@ enum {
|
||||
SWP_FILE = (1 << 7), /* set after swap_activate success */
|
||||
SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
|
||||
SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
|
||||
SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
|
||||
/* add others here before... */
|
||||
SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */
|
||||
SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */
|
||||
};
|
||||
|
||||
#define SWAP_CLUSTER_MAX 32UL
|
||||
|
@@ -8,23 +8,7 @@
|
||||
#ifndef _LINUX_TIMERFD_H
|
||||
#define _LINUX_TIMERFD_H
|
||||
|
||||
/* For O_CLOEXEC and O_NONBLOCK */
|
||||
#include <linux/fcntl.h>
|
||||
|
||||
/* For _IO helpers */
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
/*
|
||||
* CAREFUL: Check include/asm-generic/fcntl.h when defining
|
||||
* new flags, since they might collide with O_* ones. We want
|
||||
* to re-use O_* flags that couldn't possibly have a meaning
|
||||
* from eventfd, in order to leave a free define-space for
|
||||
* shared O_* flags.
|
||||
*/
|
||||
#define TFD_TIMER_ABSTIME (1 << 0)
|
||||
#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
|
||||
#define TFD_CLOEXEC O_CLOEXEC
|
||||
#define TFD_NONBLOCK O_NONBLOCK
|
||||
#include <uapi/linux/timerfd.h>
|
||||
|
||||
#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
|
||||
/* Flags for timerfd_create. */
|
||||
@@ -32,6 +16,4 @@
|
||||
/* Flags for timerfd_settime. */
|
||||
#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
|
||||
|
||||
#define TFD_IOC_SET_TICKS _IOW('T', 0, u64)
|
||||
|
||||
#endif /* _LINUX_TIMERFD_H */
|
||||
|
@@ -47,8 +47,7 @@
|
||||
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
|
||||
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
|
||||
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
|
||||
{(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
|
||||
{(unsigned long)__GFP_OTHER_NODE, "__GFP_OTHER_NODE"} \
|
||||
{(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
|
||||
|
||||
#define show_gfp_flags(flags) \
|
||||
(flags) ? __print_flags(flags, "|", \
|
||||
|
@@ -414,6 +414,7 @@ header-y += telephony.h
|
||||
header-y += termios.h
|
||||
header-y += thermal.h
|
||||
header-y += time.h
|
||||
header-y += timerfd.h
|
||||
header-y += times.h
|
||||
header-y += timex.h
|
||||
header-y += tiocl.h
|
||||
|
36
include/uapi/linux/timerfd.h
Normal file
36
include/uapi/linux/timerfd.h
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* include/linux/timerfd.h
|
||||
*
|
||||
* Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _UAPI_LINUX_TIMERFD_H
|
||||
#define _UAPI_LINUX_TIMERFD_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* For O_CLOEXEC and O_NONBLOCK */
|
||||
#include <linux/fcntl.h>
|
||||
|
||||
/* For _IO helpers */
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
/*
|
||||
* CAREFUL: Check include/asm-generic/fcntl.h when defining
|
||||
* new flags, since they might collide with O_* ones. We want
|
||||
* to re-use O_* flags that couldn't possibly have a meaning
|
||||
* from eventfd, in order to leave a free define-space for
|
||||
* shared O_* flags.
|
||||
*
|
||||
* Also make sure to update the masks in include/linux/timerfd.h
|
||||
* when adding new flags.
|
||||
*/
|
||||
#define TFD_TIMER_ABSTIME (1 << 0)
|
||||
#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
|
||||
#define TFD_CLOEXEC O_CLOEXEC
|
||||
#define TFD_NONBLOCK O_NONBLOCK
|
||||
|
||||
#define TFD_IOC_SET_TICKS _IOW('T', 0, __u64)
|
||||
|
||||
#endif /* _UAPI_LINUX_TIMERFD_H */
|
Reference in New Issue
Block a user