Merge commit '3cf2f34' into sched/core, to fix build error
Fix this dependency on the locking tree's smp_mb*() API changes: kernel/sched/idle.c:247:3: error: implicit declaration of function ‘smp_mb__after_atomic’ [-Werror=implicit-function-declaration] Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -3,6 +3,42 @@
|
||||
#define _LINUX_ATOMIC_H
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* Provide __deprecated wrappers for the new interface, avoid flag day changes.
|
||||
* We need the ugly external functions to break header recursion hell.
|
||||
*/
|
||||
#ifndef smp_mb__before_atomic_inc
|
||||
static inline void __deprecated smp_mb__before_atomic_inc(void)
|
||||
{
|
||||
extern void __smp_mb__before_atomic(void);
|
||||
__smp_mb__before_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__after_atomic_inc
|
||||
static inline void __deprecated smp_mb__after_atomic_inc(void)
|
||||
{
|
||||
extern void __smp_mb__after_atomic(void);
|
||||
__smp_mb__after_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__before_atomic_dec
|
||||
static inline void __deprecated smp_mb__before_atomic_dec(void)
|
||||
{
|
||||
extern void __smp_mb__before_atomic(void);
|
||||
__smp_mb__before_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__after_atomic_dec
|
||||
static inline void __deprecated smp_mb__after_atomic_dec(void)
|
||||
{
|
||||
extern void __smp_mb__after_atomic(void);
|
||||
__smp_mb__after_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* atomic_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic_t
|
||||
|
@@ -32,6 +32,26 @@ extern unsigned long __sw_hweight64(__u64 w);
|
||||
*/
|
||||
#include <asm/bitops.h>
|
||||
|
||||
/*
|
||||
* Provide __deprecated wrappers for the new interface, avoid flag day changes.
|
||||
* We need the ugly external functions to break header recursion hell.
|
||||
*/
|
||||
#ifndef smp_mb__before_clear_bit
|
||||
static inline void __deprecated smp_mb__before_clear_bit(void)
|
||||
{
|
||||
extern void __smp_mb__before_atomic(void);
|
||||
__smp_mb__before_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__after_clear_bit
|
||||
static inline void __deprecated smp_mb__after_clear_bit(void)
|
||||
{
|
||||
extern void __smp_mb__after_atomic(void);
|
||||
__smp_mb__after_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
#define for_each_set_bit(bit, addr, size) \
|
||||
for ((bit) = find_first_bit((addr), (size)); \
|
||||
(bit) < (size); \
|
||||
|
@@ -278,7 +278,7 @@ static inline void get_bh(struct buffer_head *bh)
|
||||
|
||||
static inline void put_bh(struct buffer_head *bh)
|
||||
{
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&bh->b_count);
|
||||
}
|
||||
|
||||
|
@@ -649,7 +649,7 @@ static inline void hd_ref_init(struct hd_struct *part)
|
||||
static inline void hd_struct_get(struct hd_struct *part)
|
||||
{
|
||||
atomic_inc(&part->ref);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static inline int hd_struct_try_get(struct hd_struct *part)
|
||||
|
@@ -491,7 +491,7 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
|
||||
|
||||
static inline void tasklet_unlock(struct tasklet_struct *t)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(TASKLET_STATE_RUN, &(t)->state);
|
||||
}
|
||||
|
||||
@@ -539,7 +539,7 @@ static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
|
||||
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
|
||||
{
|
||||
atomic_inc(&t->count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static inline void tasklet_disable(struct tasklet_struct *t)
|
||||
@@ -551,13 +551,13 @@ static inline void tasklet_disable(struct tasklet_struct *t)
|
||||
|
||||
static inline void tasklet_enable(struct tasklet_struct *t)
|
||||
{
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&t->count);
|
||||
}
|
||||
|
||||
static inline void tasklet_hi_enable(struct tasklet_struct *t)
|
||||
{
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&t->count);
|
||||
}
|
||||
|
||||
|
@@ -493,7 +493,7 @@ static inline void napi_disable(struct napi_struct *n)
|
||||
static inline void napi_enable(struct napi_struct *n)
|
||||
{
|
||||
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(NAPI_STATE_SCHED, &n->state);
|
||||
}
|
||||
|
||||
|
@@ -2795,10 +2795,8 @@ static inline bool __must_check current_set_polling_and_test(void)
|
||||
/*
|
||||
* Polling state must be visible before we test NEED_RESCHED,
|
||||
* paired by resched_task()
|
||||
*
|
||||
* XXX: assumes set/clear bit are identical barrier wise.
|
||||
*/
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
return unlikely(tif_need_resched());
|
||||
}
|
||||
@@ -2816,7 +2814,7 @@ static inline bool __must_check current_clr_polling_and_test(void)
|
||||
* Polling state must be visible before we test NEED_RESCHED,
|
||||
* paired by resched_task()
|
||||
*/
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
return unlikely(tif_need_resched());
|
||||
}
|
||||
|
@@ -142,18 +142,18 @@ struct rpc_task_setup {
|
||||
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
|
||||
#define rpc_clear_running(t) \
|
||||
do { \
|
||||
smp_mb__before_clear_bit(); \
|
||||
smp_mb__before_atomic(); \
|
||||
clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
|
||||
smp_mb__after_clear_bit(); \
|
||||
smp_mb__after_atomic(); \
|
||||
} while (0)
|
||||
|
||||
#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
|
||||
#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
|
||||
#define rpc_clear_queued(t) \
|
||||
do { \
|
||||
smp_mb__before_clear_bit(); \
|
||||
smp_mb__before_atomic(); \
|
||||
clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
|
||||
smp_mb__after_clear_bit(); \
|
||||
smp_mb__after_atomic(); \
|
||||
} while (0)
|
||||
|
||||
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
|
||||
|
@@ -379,9 +379,9 @@ static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
|
||||
|
||||
static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(XPRT_CONNECTING, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static inline int xprt_connecting(struct rpc_xprt *xprt)
|
||||
@@ -411,9 +411,9 @@ static inline void xprt_clear_bound(struct rpc_xprt *xprt)
|
||||
|
||||
static inline void xprt_clear_binding(struct rpc_xprt *xprt)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(XPRT_BINDING, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
|
||||
|
@@ -191,7 +191,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
|
||||
* pairs with task_work_add()->set_notify_resume() after
|
||||
* hlist_add_head(task->task_works);
|
||||
*/
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
if (unlikely(current->task_works))
|
||||
task_work_run();
|
||||
}
|
||||
|
Reference in New Issue
Block a user