ANDROID: vendor_hooks: Add hooks for mutex and rwsem optimistic spin

These hooks help us do the following things:
a) Record the number of mutex and rwsem optimistic spin.
b) Monitor the time of mutex and rwsem optimistic spin.
c) Make it possible if oems don't want mutex and rwsem to optimistic spin
for a long time.

Bug: 267565260
Change-Id: I2bee30fb17946be85e026213b481aeaeaee2459f
Signed-off-by: Liujie Xie <xieliujie@oppo.com>
This commit is contained in:
Liujie Xie
2023-02-14 14:42:26 +08:00
committed by Todd Kjos
parent d4d05c6e6e
commit d01f7e1269
4 changed files with 47 additions and 0 deletions

View File

@@ -122,6 +122,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_opt_spin_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_opt_spin_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_can_spin_on_owner);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_read_wait_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_read_wait_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_wait_start);
@@ -131,6 +134,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_set_reader_owned);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_mark_wake_readers);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_up_read_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_up_write_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_opt_spin_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_opt_spin_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_can_spin_on_owner);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_show_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shmem_alloc_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_enter);

View File

@@ -32,6 +32,15 @@ DECLARE_HOOK(android_vh_mutex_wait_start,
DECLARE_HOOK(android_vh_mutex_wait_finish,
TP_PROTO(struct mutex *lock),
TP_ARGS(lock));
DECLARE_HOOK(android_vh_mutex_opt_spin_start,
TP_PROTO(struct mutex *lock, bool *time_out, int *cnt),
TP_ARGS(lock, time_out, cnt));
DECLARE_HOOK(android_vh_mutex_opt_spin_finish,
TP_PROTO(struct mutex *lock, bool taken),
TP_ARGS(lock, taken));
DECLARE_HOOK(android_vh_mutex_can_spin_on_owner,
TP_PROTO(struct mutex *lock, int *retval),
TP_ARGS(lock, retval));
DECLARE_HOOK(android_vh_rtmutex_wait_start,
TP_PROTO(struct rt_mutex *lock),
@@ -52,6 +61,15 @@ DECLARE_HOOK(android_vh_rwsem_write_wait_start,
DECLARE_HOOK(android_vh_rwsem_write_wait_finish,
TP_PROTO(struct rw_semaphore *sem),
TP_ARGS(sem));
DECLARE_HOOK(android_vh_rwsem_opt_spin_start,
TP_PROTO(struct rw_semaphore *sem, bool *time_out, int *cnt, bool chk_only),
TP_ARGS(sem, time_out, cnt, chk_only));
DECLARE_HOOK(android_vh_rwsem_opt_spin_finish,
TP_PROTO(struct rw_semaphore *sem, bool taken, bool wlock),
TP_ARGS(sem, taken, wlock));
DECLARE_HOOK(android_vh_rwsem_can_spin_on_owner,
TP_PROTO(struct rw_semaphore *sem, bool *ret, bool wlock),
TP_ARGS(sem, ret, wlock));
DECLARE_HOOK(android_vh_sched_show_task,
TP_PROTO(struct task_struct *task),

View File

@@ -568,9 +568,16 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
{
bool ret = true;
int cnt = 0;
bool time_out = false;
rcu_read_lock();
while (__mutex_owner(lock) == owner) {
trace_android_vh_mutex_opt_spin_start(lock, &time_out, &cnt);
if (time_out) {
ret = false;
break;
}
/*
* Ensure we emit the owner->on_cpu, dereference _after_
* checking lock->owner still matches owner. If that fails,
@@ -621,6 +628,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
if (owner)
retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
rcu_read_unlock();
trace_android_vh_mutex_can_spin_on_owner(lock, &retval);
/*
* If lock->owner is not set, the mutex has been released. Return true
@@ -702,6 +710,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
if (!waiter)
osq_unlock(&lock->osq);
trace_android_vh_mutex_opt_spin_finish(lock, true);
return true;
@@ -710,6 +719,7 @@ fail_unlock:
osq_unlock(&lock->osq);
fail:
trace_android_vh_mutex_opt_spin_finish(lock, false);
/*
* If we fell out of the spin path because of need_resched(),
* reschedule now, before we try-lock the mutex. This avoids getting

View File

@@ -673,6 +673,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
ret = false;
rcu_read_unlock();
preempt_enable();
trace_android_vh_rwsem_can_spin_on_owner(sem, &ret, nonspinnable == RWSEM_WR_NONSPINNABLE);
lockevent_cond_inc(rwsem_opt_fail, !ret);
return ret;
@@ -715,6 +716,8 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
struct task_struct *new, *owner;
unsigned long flags, new_flags;
enum owner_state state;
int cnt = 0;
bool time_out = false;
owner = rwsem_owner_flags(sem, &flags);
state = rwsem_owner_state(owner, flags, nonspinnable);
@@ -723,6 +726,9 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
rcu_read_lock();
for (;;) {
trace_android_vh_rwsem_opt_spin_start(sem, &time_out, &cnt, true);
if (time_out)
break;
/*
* When a waiting writer set the handoff flag, it may spin
* on the owner as well. Once that writer acquires the lock,
@@ -786,6 +792,8 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
int prev_owner_state = OWNER_NULL;
int loop = 0;
u64 rspin_threshold = 0;
int cnt = 0;
bool time_out = false;
unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE
: RWSEM_RD_NONSPINNABLE;
@@ -804,6 +812,10 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
for (;;) {
enum owner_state owner_state;
trace_android_vh_rwsem_opt_spin_start(sem, &time_out, &cnt, false);
if (time_out)
break;
owner_state = rwsem_spin_on_owner(sem, nonspinnable);
if (!(owner_state & OWNER_SPINNABLE))
break;
@@ -898,6 +910,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
cpu_relax();
}
osq_unlock(&sem->osq);
trace_android_vh_rwsem_opt_spin_finish(sem, taken, wlock);
done:
preempt_enable();
lockevent_cond_inc(rwsem_opt_fail, !taken);