Merge branch 'linus' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -22,7 +22,6 @@ struct dentry;
|
||||
*/
|
||||
enum wb_state {
|
||||
WB_registered, /* bdi_register() was done */
|
||||
WB_shutting_down, /* wb_shutdown() in progress */
|
||||
WB_writeback_running, /* Writeback is in progress */
|
||||
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
|
||||
WB_start_all, /* nr_pages == 0 (all) work pending */
|
||||
@@ -189,6 +188,7 @@ struct backing_dev_info {
|
||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
|
||||
struct rb_root cgwb_congested_tree; /* their congested states */
|
||||
struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
|
||||
#else
|
||||
struct bdi_writeback_congested *wb_congested;
|
||||
#endif
|
||||
|
@@ -472,7 +472,9 @@ struct sock_fprog_kern {
|
||||
struct bpf_binary_header {
|
||||
u16 pages;
|
||||
u16 locked:1;
|
||||
u8 image[];
|
||||
|
||||
/* Some arches need word alignment for their instructions */
|
||||
u8 image[] __aligned(4);
|
||||
};
|
||||
|
||||
struct bpf_prog {
|
||||
|
@@ -503,6 +503,7 @@ struct irq_chip {
|
||||
* IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
|
||||
* IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
|
||||
* IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
|
||||
* IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
|
||||
*/
|
||||
enum {
|
||||
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
|
||||
|
@@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
|
||||
return desc->irq_common_data.handler_data;
|
||||
}
|
||||
|
||||
static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
|
||||
{
|
||||
return desc->irq_common_data.msi_desc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Architectures call this to let the generic IRQ layer
|
||||
* handle an interrupt.
|
||||
|
@@ -666,7 +666,7 @@ do { \
|
||||
* your code. (Extra memory is used for special buffers that are
|
||||
* allocated when trace_printk() is used.)
|
||||
*
|
||||
* A little optization trick is done here. If there's only one
|
||||
* A little optimization trick is done here. If there's only one
|
||||
* argument, there's no need to scan the string for printf formats.
|
||||
* The trace_puts() will suffice. But how can we take advantage of
|
||||
* using trace_puts() when trace_printk() has only one argument?
|
||||
|
@@ -38,6 +38,7 @@ struct memory_block {
|
||||
|
||||
int arch_get_memory_phys_device(unsigned long start_pfn);
|
||||
unsigned long memory_block_size_bytes(void);
|
||||
int set_memory_block_size_order(unsigned int order);
|
||||
|
||||
/* These states are exposed to userspace as text strings in sysfs */
|
||||
#define MEM_ONLINE (1<<0) /* exposed to userspace */
|
||||
|
@@ -1438,6 +1438,8 @@ enum {
|
||||
NFS_IOHDR_EOF,
|
||||
NFS_IOHDR_REDO,
|
||||
NFS_IOHDR_STAT,
|
||||
NFS_IOHDR_RESEND_PNFS,
|
||||
NFS_IOHDR_RESEND_MDS,
|
||||
};
|
||||
|
||||
struct nfs_io_completion;
|
||||
|
@@ -98,5 +98,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r);
|
||||
extern __must_check bool refcount_dec_not_one(refcount_t *r);
|
||||
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
|
||||
extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
|
||||
|
||||
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
|
||||
spinlock_t *lock,
|
||||
unsigned long *flags);
|
||||
#endif /* _LINUX_REFCOUNT_H */
|
||||
|
@@ -1799,20 +1799,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
|
||||
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
|
||||
}
|
||||
|
||||
void __rseq_handle_notify_resume(struct pt_regs *regs);
|
||||
void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
|
||||
|
||||
static inline void rseq_handle_notify_resume(struct pt_regs *regs)
|
||||
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (current->rseq)
|
||||
__rseq_handle_notify_resume(regs);
|
||||
__rseq_handle_notify_resume(ksig, regs);
|
||||
}
|
||||
|
||||
static inline void rseq_signal_deliver(struct pt_regs *regs)
|
||||
static inline void rseq_signal_deliver(struct ksignal *ksig,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
preempt_disable();
|
||||
__set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
|
||||
preempt_enable();
|
||||
rseq_handle_notify_resume(regs);
|
||||
rseq_handle_notify_resume(ksig, regs);
|
||||
}
|
||||
|
||||
/* rseq_preempt() requires preemption to be disabled. */
|
||||
@@ -1831,9 +1833,7 @@ static inline void rseq_migrate(struct task_struct *t)
|
||||
|
||||
/*
|
||||
* If parent process has a registered restartable sequences area, the
|
||||
* child inherits. Only applies when forking a process, not a thread. In
|
||||
* case a parent fork() in the middle of a restartable sequence, set the
|
||||
* resume notifier to force the child to retry.
|
||||
* child inherits. Only applies when forking a process, not a thread.
|
||||
*/
|
||||
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
|
||||
{
|
||||
@@ -1847,7 +1847,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
|
||||
t->rseq_len = current->rseq_len;
|
||||
t->rseq_sig = current->rseq_sig;
|
||||
t->rseq_event_mask = current->rseq_event_mask;
|
||||
rseq_preempt(t);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1864,10 +1863,12 @@ static inline void rseq_execve(struct task_struct *t)
|
||||
static inline void rseq_set_notify_resume(struct task_struct *t)
|
||||
{
|
||||
}
|
||||
static inline void rseq_handle_notify_resume(struct pt_regs *regs)
|
||||
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
static inline void rseq_signal_deliver(struct pt_regs *regs)
|
||||
static inline void rseq_signal_deliver(struct ksignal *ksig,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
static inline void rseq_preempt(struct task_struct *t)
|
||||
|
@@ -427,6 +427,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
|
||||
#define atomic_dec_and_lock(atomic, lock) \
|
||||
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
|
||||
|
||||
extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
|
||||
unsigned long *flags);
|
||||
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
|
||||
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
|
||||
|
||||
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
|
||||
size_t max_size, unsigned int cpu_mult,
|
||||
gfp_t gfp);
|
||||
|
Reference in New Issue
Block a user