locking/percpu-rwsem: Fold __percpu_up_read()

Now that __percpu_up_read() is only ever used from percpu_up_read()
merge them, it's a small function.

Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Waiman Long <longman@redhat.com>
Link: https://lkml.kernel.org/r/20200131151540.212415454@infradead.org
This commit is contained in:
Davidlohr Bueso
2019-11-18 15:19:35 -08:00
committed by Ingo Molnar
parent bcba67cd80
commit ac8dec4209
3 changed files with 16 additions and 19 deletions

View File

@@ -43,7 +43,6 @@ is_static struct percpu_rw_semaphore name = { \
__DEFINE_PERCPU_RWSEM(name, static)
extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
extern void __percpu_up_read(struct percpu_rw_semaphore *);
static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
@@ -103,10 +102,22 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
/*
* Same as in percpu_down_read().
*/
if (likely(rcu_sync_is_idle(&sem->rss)))
if (likely(rcu_sync_is_idle(&sem->rss))) {
__this_cpu_dec(*sem->read_count);
else
__percpu_up_read(sem); /* Unconditional memory barrier */
} else {
/*
* slowpath; reader will only ever wake a single blocked
* writer.
*/
smp_mb(); /* B matches C */
/*
* In other words, if they see our decrement (presumably to
* aggregate zero, as that is the only time it matters) they
* will also see our critical section.
*/
__this_cpu_dec(*sem->read_count);
rcuwait_wake_up(&sem->writer);
}
preempt_enable();
}