Merge branch 'sched/rt' into sched/core, to pick up -rt changes
Pick up the first couple of patches working towards PREEMPT_RT. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -181,6 +181,7 @@ struct blkcg_policy {
|
||||
|
||||
extern struct blkcg blkcg_root;
|
||||
extern struct cgroup_subsys_state * const blkcg_root_css;
|
||||
extern bool blkcg_debug_stats;
|
||||
|
||||
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
|
||||
struct request_queue *q, bool update_hint);
|
||||
|
@@ -311,6 +311,7 @@ enum req_flag_bits {
|
||||
__REQ_RAHEAD, /* read ahead, can fail anytime */
|
||||
__REQ_BACKGROUND, /* background IO */
|
||||
__REQ_NOWAIT, /* Don't wait if request will block */
|
||||
__REQ_NOWAIT_INLINE, /* Return would-block error inline */
|
||||
/*
|
||||
* When a shared kthread needs to issue a bio for a cgroup, doing
|
||||
* so synchronously can lead to priority inversions as the kthread
|
||||
@@ -345,6 +346,7 @@ enum req_flag_bits {
|
||||
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
|
||||
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
|
||||
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
|
||||
#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
|
||||
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
|
||||
|
||||
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
|
||||
@@ -418,12 +420,13 @@ static inline int op_stat_group(unsigned int op)
|
||||
|
||||
typedef unsigned int blk_qc_t;
|
||||
#define BLK_QC_T_NONE -1U
|
||||
#define BLK_QC_T_EAGAIN -2U
|
||||
#define BLK_QC_T_SHIFT 16
|
||||
#define BLK_QC_T_INTERNAL (1U << 31)
|
||||
|
||||
static inline bool blk_qc_t_valid(blk_qc_t cookie)
|
||||
{
|
||||
return cookie != BLK_QC_T_NONE;
|
||||
return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
|
||||
}
|
||||
|
||||
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
|
||||
|
@@ -145,7 +145,11 @@ struct cred {
|
||||
struct user_struct *user; /* real user ID subscription */
|
||||
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
|
||||
struct group_info *group_info; /* supplementary groups for euid/fsgid */
|
||||
struct rcu_head rcu; /* RCU deletion hook */
|
||||
/* RCU deletion */
|
||||
union {
|
||||
int non_rcu; /* Can we skip RCU deletion? */
|
||||
struct rcu_head rcu; /* RCU deletion hook */
|
||||
};
|
||||
} __randomize_layout;
|
||||
|
||||
extern void __put_cred(struct cred *);
|
||||
@@ -246,6 +250,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
|
||||
if (!cred)
|
||||
return cred;
|
||||
validate_creds(cred);
|
||||
nonconst_cred->non_rcu = 0;
|
||||
return get_new_cred(nonconst_cred);
|
||||
}
|
||||
|
||||
@@ -257,6 +262,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
|
||||
if (!atomic_inc_not_zero(&nonconst_cred->usage))
|
||||
return NULL;
|
||||
validate_creds(cred);
|
||||
nonconst_cred->non_rcu = 0;
|
||||
return cred;
|
||||
}
|
||||
|
||||
|
@@ -915,6 +915,8 @@ struct dev_links_info {
|
||||
* This identifies the device type and carries type-specific
|
||||
* information.
|
||||
* @mutex: Mutex to synchronize calls to its driver.
|
||||
* @lockdep_mutex: An optional debug lock that a subsystem can use as a
|
||||
* peer lock to gain localized lockdep coverage of the device_lock.
|
||||
* @bus: Type of bus device is on.
|
||||
* @driver: Which driver has allocated this
|
||||
* @platform_data: Platform data specific to the device.
|
||||
@@ -998,6 +1000,9 @@ struct device {
|
||||
core doesn't touch it */
|
||||
void *driver_data; /* Driver data, set and get with
|
||||
dev_set_drvdata/dev_get_drvdata */
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
struct mutex lockdep_mutex;
|
||||
#endif
|
||||
struct mutex mutex; /* mutex to synchronize calls to
|
||||
* its driver.
|
||||
*/
|
||||
@@ -1383,6 +1388,7 @@ extern int (*platform_notify_remove)(struct device *dev);
|
||||
*/
|
||||
extern struct device *get_device(struct device *dev);
|
||||
extern void put_device(struct device *dev);
|
||||
extern bool kill_device(struct device *dev);
|
||||
|
||||
#ifdef CONFIG_DEVTMPFS
|
||||
extern int devtmpfs_create_node(struct device *dev);
|
||||
|
@@ -689,8 +689,8 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
|
||||
*/
|
||||
static inline bool dma_addressing_limited(struct device *dev)
|
||||
{
|
||||
return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) <
|
||||
dma_get_required_mask(dev);
|
||||
return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
|
||||
dma_get_required_mask(dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||
|
@@ -45,7 +45,6 @@ struct elevator_mq_ops {
|
||||
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
|
||||
bool (*has_work)(struct blk_mq_hw_ctx *);
|
||||
void (*completed_request)(struct request *, u64);
|
||||
void (*started_request)(struct request *);
|
||||
void (*requeue_request)(struct request *);
|
||||
struct request *(*former_request)(struct request_queue *, struct request *);
|
||||
struct request *(*next_request)(struct request_queue *, struct request *);
|
||||
|
@@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
||||
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
|
||||
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
||||
unsigned long start_pfn);
|
||||
bool has_iova_flush_queue(struct iova_domain *iovad);
|
||||
int init_iova_flush_queue(struct iova_domain *iovad,
|
||||
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
|
||||
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||
@@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad,
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool has_iova_flush_queue(struct iova_domain *iovad)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int init_iova_flush_queue(struct iova_domain *iovad,
|
||||
iova_flush_cb flush_cb,
|
||||
iova_entry_dtor entry_dtor)
|
||||
|
@@ -814,6 +814,7 @@ struct tee_client_device_id {
|
||||
/**
|
||||
* struct wmi_device_id - WMI device identifier
|
||||
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
|
||||
* @context: pointer to driver specific data
|
||||
*/
|
||||
struct wmi_device_id {
|
||||
const char guid_string[UUID_STRING_LEN+1];
|
||||
|
@@ -1164,7 +1164,7 @@ static inline int of_property_read_string_index(const struct device_node *np,
|
||||
}
|
||||
|
||||
/**
|
||||
* of_property_read_bool - Findfrom a property
|
||||
* of_property_read_bool - Find a property
|
||||
* @np: device node from which the property value is to be read.
|
||||
* @propname: name of the property to be searched.
|
||||
*
|
||||
|
@@ -182,7 +182,7 @@ do { \
|
||||
|
||||
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
#define preempt_enable() \
|
||||
do { \
|
||||
barrier(); \
|
||||
@@ -203,7 +203,7 @@ do { \
|
||||
__preempt_schedule(); \
|
||||
} while (0)
|
||||
|
||||
#else /* !CONFIG_PREEMPT */
|
||||
#else /* !CONFIG_PREEMPTION */
|
||||
#define preempt_enable() \
|
||||
do { \
|
||||
barrier(); \
|
||||
@@ -217,7 +217,7 @@ do { \
|
||||
} while (0)
|
||||
|
||||
#define preempt_check_resched() do { } while (0)
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
#endif /* CONFIG_PREEMPTION */
|
||||
|
||||
#define preempt_disable_notrace() \
|
||||
do { \
|
||||
|
@@ -578,7 +578,7 @@ do { \
|
||||
*
|
||||
* In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
|
||||
* it is illegal to block while in an RCU read-side critical section.
|
||||
* In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
|
||||
* In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
|
||||
* kernel builds, RCU read-side critical sections may be preempted,
|
||||
* but explicit blocking is illegal. Finally, in preemptible RCU
|
||||
* implementations in real-time (with -rt patchset) kernel builds, RCU
|
||||
|
@@ -53,7 +53,7 @@ void rcu_scheduler_starting(void);
|
||||
extern int rcu_scheduler_active __read_mostly;
|
||||
void rcu_end_inkernel_boot(void);
|
||||
bool rcu_is_watching(void);
|
||||
#ifndef CONFIG_PREEMPT
|
||||
#ifndef CONFIG_PREEMPTION
|
||||
void rcu_all_qs(void);
|
||||
#endif
|
||||
|
||||
|
@@ -1772,7 +1772,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
|
||||
* value indicates whether a reschedule was done in fact.
|
||||
* cond_resched_lock() will drop the spinlock before scheduling,
|
||||
*/
|
||||
#ifndef CONFIG_PREEMPT
|
||||
#ifndef CONFIG_PREEMPTION
|
||||
extern int _cond_resched(void);
|
||||
#else
|
||||
static inline int _cond_resched(void) { return 0; }
|
||||
@@ -1801,12 +1801,12 @@ static inline void cond_resched_rcu(void)
|
||||
|
||||
/*
|
||||
* Does a critical section need to be broken due to another
|
||||
* task waiting?: (technically does not depend on CONFIG_PREEMPT,
|
||||
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
|
||||
* but a general need for low latency)
|
||||
*/
|
||||
static inline int spin_needbreak(spinlock_t *lock)
|
||||
{
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
return spin_is_contended(lock);
|
||||
#else
|
||||
return 0;
|
||||
|
@@ -214,7 +214,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
|
||||
|
||||
/*
|
||||
* Define the various spin_lock methods. Note we define these
|
||||
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
|
||||
* regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The
|
||||
* various methods are defined as nops in the case they are not
|
||||
* required.
|
||||
*/
|
||||
|
@@ -96,7 +96,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||
|
||||
/*
|
||||
* If lockdep is enabled then we use the non-preemption spin-ops
|
||||
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
|
||||
* even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are
|
||||
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
|
||||
*/
|
||||
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
|
||||
|
@@ -86,7 +86,7 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
|
||||
#define torture_stop_kthread(n, tp) \
|
||||
_torture_stop_kthread("Stopping " #n " task", &(tp))
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
#define torture_preempt_schedule() preempt_schedule()
|
||||
#else
|
||||
#define torture_preempt_schedule()
|
||||
|
@@ -126,6 +126,19 @@ static inline int waitqueue_active(struct wait_queue_head *wq_head)
|
||||
return !list_empty(&wq_head->head);
|
||||
}
|
||||
|
||||
/**
|
||||
* wq_has_single_sleeper - check if there is only one sleeper
|
||||
* @wq_head: wait queue head
|
||||
*
|
||||
* Returns true of wq_head has only one sleeper on the list.
|
||||
*
|
||||
* Please refer to the comment for waitqueue_active.
|
||||
*/
|
||||
static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
|
||||
{
|
||||
return list_is_singular(&wq_head->head);
|
||||
}
|
||||
|
||||
/**
|
||||
* wq_has_sleeper - check if there are any waiting processes
|
||||
* @wq_head: wait queue head
|
||||
|
Reference in New Issue
Block a user