Merge 4.5-rc4 into usb-next

We want the USB fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman
2016-02-14 14:38:30 -08:00
735 changed files with 6611 additions and 56920 deletions

View File

@@ -682,9 +682,12 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
/*
* q->prep_rq_fn return values
*/
#define BLKPREP_OK 0 /* serve it */
#define BLKPREP_KILL 1 /* fatal error, kill */
#define BLKPREP_DEFER 2 /* leave on queue */
enum {
BLKPREP_OK, /* serve it */
BLKPREP_KILL, /* fatal error, kill, return -EIO */
BLKPREP_DEFER, /* leave on queue */
BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
};
extern unsigned long blk_max_low_pfn, blk_max_pfn;

View File

@@ -63,6 +63,18 @@
#define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
// duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
#define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49) /* overlap w/ above */
#define CEPH_FEATURE_MON_METADATA (1ULL<<50)
#define CEPH_FEATURE_OSD_BITWISE_HOBJ_SORT (1ULL<<51) /* can sort objs bitwise */
#define CEPH_FEATURE_OSD_PROXY_WRITE_FEATURES (1ULL<<52)
#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 (1ULL<<53)
#define CEPH_FEATURE_OSD_HITSET_GMT (1ULL<<54)
#define CEPH_FEATURE_HAMMER_0_94_4 (1ULL<<55)
#define CEPH_FEATURE_NEW_OSDOP_ENCODING (1ULL<<56) /* New, v7 encoding */
#define CEPH_FEATURE_MON_STATEFUL_SUB (1ULL<<57) /* stateful mon subscription */
#define CEPH_FEATURE_MON_ROUTE_OSDMAP (1ULL<<57) /* peon sends osdmaps */
#define CEPH_FEATURE_CRUSH_TUNABLES5 (1ULL<<58) /* chooseleaf stable mode */
// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING (1ULL<<58) /* New, v7 encoding */
/*
* The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
@@ -108,7 +120,9 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_CRUSH_V4)
CEPH_FEATURE_CRUSH_V4 | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \

View File

@@ -127,6 +127,12 @@ struct cgroup_subsys_state {
*/
u64 serial_nr;
/*
* Incremented by online self and children. Used to guarantee that
* parents are not offlined before their children.
*/
atomic_t online_cnt;
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;

View File

@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
extern void cpuset_post_attach_flush(void);
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
static inline void cpuset_post_attach_flush(void)
{
}
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */

View File

@@ -59,7 +59,8 @@ enum {
CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12
CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12,
CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13
};
/*
@@ -205,6 +206,11 @@ struct crush_map {
* mappings line up a bit better with previous mappings. */
__u8 chooseleaf_vary_r;
/* if true, it makes chooseleaf firstn to return stable results (if
* no local retry) so that data migrations would be optimal when some
* device fails. */
__u8 chooseleaf_stable;
#ifndef __KERNEL__
/*
* version 0 (original) of straw_calc has various flaws. version 1

View File

@@ -14,6 +14,17 @@ int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
dax_iodone_t);
int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
dax_iodone_t);
#ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
#else
static inline struct page *read_dax_sector(struct block_device *bdev,
sector_t n)
{
return ERR_PTR(-ENXIO);
}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
unsigned int flags, get_block_t, dax_iodone_t);

View File

@@ -19,6 +19,8 @@
int devpts_new_index(struct inode *ptmx_inode);
void devpts_kill_index(struct inode *ptmx_inode, int idx);
void devpts_add_ref(struct inode *ptmx_inode);
void devpts_del_ref(struct inode *ptmx_inode);
/* mknod in devpts */
struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
void *priv);
@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
/* Dummy stubs in the no-pty case */
static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
static inline void devpts_add_ref(struct inode *ptmx_inode) { }
static inline void devpts_del_ref(struct inode *ptmx_inode) { }
static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
dev_t device, int index, void *priv)
{

View File

@@ -484,9 +484,6 @@ struct block_device {
int bd_fsfreeze_count;
/* Mutex for freeze */
struct mutex bd_fsfreeze_mutex;
#ifdef CONFIG_FS_DAX
int bd_map_count;
#endif
};
/*
@@ -2907,7 +2904,7 @@ extern void replace_mount_options(struct super_block *sb, char *options);
static inline bool io_is_direct(struct file *filp)
{
return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
}
static inline int iocb_flags(struct file *file)

View File

@@ -547,16 +547,16 @@ static inline bool pm_suspended_storage(void)
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_CMA
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype);
extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
#endif
#ifdef CONFIG_CMA
/* CMA stuff */
extern void init_cma_reserved_pageblock(struct page *page);
#endif
#endif /* __LINUX_GFP_H */

View File

@@ -526,6 +526,7 @@ enum ata_lpm_policy {
enum ata_lpm_hints {
ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */
ATA_LPM_HIPM = (1 << 1), /* may use HIPM */
ATA_LPM_WAKE_ONLY = (1 << 2), /* only wake up link */
};
/* forward declarations */

View File

@@ -66,7 +66,7 @@ struct lock_class {
/*
* class-hash:
*/
struct list_head hash_entry;
struct hlist_node hash_entry;
/*
* global list of all lock-classes:
@@ -199,7 +199,7 @@ struct lock_chain {
u8 irq_context;
u8 depth;
u16 base;
struct list_head entry;
struct hlist_node entry;
u64 chain_key;
};

View File

@@ -51,7 +51,7 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
/* default hierarchy stats */
MEMCG_SOCK,
MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
MEMCG_NR_STAT,
};

View File

@@ -201,11 +201,13 @@ extern unsigned int kobjsize(const void *objp);
#endif
#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#define VM_STACK VM_GROWSUP
#else
#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#define VM_STACK VM_GROWSDOWN
#endif
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
/*
* Special vmas that are non-mergable, non-mlock()able.
* Note: mm/huge_memory.c VM_NO_THP depends on this definition.
@@ -1341,8 +1343,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
!vma_growsup(vma->vm_next, addr);
}
extern struct task_struct *task_of_stack(struct task_struct *task,
struct vm_area_struct *vma, bool in_group);
int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,

View File

@@ -424,9 +424,9 @@ struct mm_struct {
unsigned long total_vm; /* Total pages mapped */
unsigned long locked_vm; /* Pages that have PG_mlocked set */
unsigned long pinned_vm; /* Refcount permanently increased */
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
unsigned long stack_vm; /* VM_GROWSUP/DOWN */
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;

View File

@@ -682,6 +682,12 @@ typedef struct pglist_data {
*/
unsigned long first_deferred_pfn;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
spinlock_t split_queue_lock;
struct list_head split_queue;
unsigned long split_queue_len;
#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)

View File

@@ -324,6 +324,12 @@ struct module_layout {
#define __module_layout_align
#endif
struct mod_kallsyms {
Elf_Sym *symtab;
unsigned int num_symtab;
char *strtab;
};
struct module {
enum module_state state;
@@ -405,15 +411,10 @@ struct module {
#endif
#ifdef CONFIG_KALLSYMS
/*
* We keep the symbol and string tables for kallsyms.
* The core_* fields below are temporary, loader-only (they
* could really be discarded after module init).
*/
Elf_Sym *symtab, *core_symtab;
unsigned int num_symtab, core_num_syms;
char *strtab, *core_strtab;
/* Protected by RCU and/or module_mutex: use rcu_dereference() */
struct mod_kallsyms *kallsyms;
struct mod_kallsyms core_kallsyms;
/* Section attributes */
struct module_sect_attrs *sect_attrs;

View File

@@ -512,7 +512,6 @@ static inline void napi_enable(struct napi_struct *n)
clear_bit(NAPI_STATE_NPSVC, &n->state);
}
#ifdef CONFIG_SMP
/**
* napi_synchronize - wait until NAPI is not running
* @n: napi context
@@ -523,12 +522,12 @@ static inline void napi_enable(struct napi_struct *n)
*/
static inline void napi_synchronize(const struct napi_struct *n)
{
while (test_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
if (IS_ENABLED(CONFIG_SMP))
while (test_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
else
barrier();
}
#else
# define napi_synchronize(n) barrier()
#endif
enum netdev_queue_state_t {
__QUEUE_STATE_DRV_XOFF,

View File

@@ -929,7 +929,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
return num;
}
#ifdef CONFIG_OF
#if defined(CONFIG_OF) && !defined(MODULE)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
__used __section(__##table##_of_table) \

View File

@@ -10,7 +10,7 @@
* backing is indicated by flags in the high bits of the value.
*/
typedef struct {
unsigned long val;
u64 val;
} pfn_t;
#endif

View File

@@ -9,14 +9,13 @@
* PFN_DEV - pfn is not covered by system memmap by default
* PFN_MAP - pfn has a dynamic page mapping established by a device driver
*/
#define PFN_FLAGS_MASK (((unsigned long) ~PAGE_MASK) \
<< (BITS_PER_LONG - PAGE_SHIFT))
#define PFN_SG_CHAIN (1UL << (BITS_PER_LONG - 1))
#define PFN_SG_LAST (1UL << (BITS_PER_LONG - 2))
#define PFN_DEV (1UL << (BITS_PER_LONG - 3))
#define PFN_MAP (1UL << (BITS_PER_LONG - 4))
#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, unsigned long flags)
static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
{
pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
@@ -29,7 +28,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
return __pfn_to_pfn_t(pfn, 0);
}
extern pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags);
extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags);
static inline bool pfn_t_has_page(pfn_t pfn)
{
@@ -48,7 +47,7 @@ static inline struct page *pfn_t_to_page(pfn_t pfn)
return NULL;
}
static inline dma_addr_t pfn_t_to_phys(pfn_t pfn)
static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
{
return PFN_PHYS(pfn_t_to_pfn(pfn));
}
@@ -87,7 +86,7 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline bool pfn_t_devmap(pfn_t pfn)
{
const unsigned long flags = PFN_DEV|PFN_MAP;
const u64 flags = PFN_DEV|PFN_MAP;
return (pfn.val & flags) == flags;
}

View File

@@ -378,13 +378,29 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
void **radix_tree_next_chunk(struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned flags);
/**
* radix_tree_iter_retry - retry this chunk of the iteration
* @iter: iterator state
*
* If we iterate over a tree protected only by the RCU lock, a race
* against deletion or creation may result in seeing a slot for which
* radix_tree_deref_retry() returns true. If so, call this function
* and continue the iteration.
*/
static inline __must_check
void **radix_tree_iter_retry(struct radix_tree_iter *iter)
{
iter->next_index = iter->index;
return NULL;
}
/**
* radix_tree_chunk_size - get current chunk size
*
* @iter: pointer to radix tree iterator
* Returns: current chunk size
*/
static __always_inline unsigned
static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
return iter->next_index - iter->index;
@@ -418,9 +434,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
return slot + offset + 1;
}
} else {
unsigned size = radix_tree_chunk_size(iter) - 1;
long size = radix_tree_chunk_size(iter);
while (size--) {
while (--size > 0) {
slot++;
iter->index++;
if (likely(*slot))

View File

@@ -152,6 +152,8 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
# define jiffies raid6_jiffies()
# define printk printf
# define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__)
# define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__)
# define GFP_KERNEL 0
# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
PROT_READ|PROT_WRITE, \

View File

@@ -109,20 +109,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
__put_anon_vma(anon_vma);
}
static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
down_write(&anon_vma->root->rwsem);
}
static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
up_write(&anon_vma->root->rwsem);
}
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
{
down_write(&anon_vma->root->rwsem);

View File

@@ -299,6 +299,7 @@ struct sk_buff;
#else
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
#endif
extern int sysctl_max_skb_frags;
typedef struct skb_frag_struct skb_frag_t;

View File

@@ -311,6 +311,7 @@ enum {
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -411,12 +412,12 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
#define create_workqueue(name) \
alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
#define create_freezable_workqueue(name) \
alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
1, (name))
alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
WQ_MEM_RECLAIM, 1, (name))
#define create_singlethread_workqueue(name) \
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
extern void destroy_workqueue(struct workqueue_struct *wq);