Merge keystone/android12-5.10-keystone-qcom-release.117+ (26604a5) into msm-5.10

* refs/heads/tmp-26604a5:
  UPSTREAM: usb: dwc3: gadget: Avoid duplicate requests to enable Run/Stop
  UPSTREAM: usb: typec: ucsi: Acknowledge the GET_ERROR_STATUS command completion
  BACKPORT: scsi: ufs: core: Increase fDeviceInit poll frequency
  FROMGIT: f2fs: increase the limit for reserve_root
  FROMGIT: f2fs: complete checkpoints during remount
  FROMGIT: f2fs: flush pending checkpoints when freezing super
  BACKPORT: f2fs: don't get FREEZE lock in f2fs_evict_inode in frozen fs
  BACKPORT: f2fs: introduce F2FS_IPU_HONOR_OPU_WRITE ipu policy
  Revert "ANDROID: GKI: signal: Export for __lock_task_sighand"
  BACKPORT: f2fs: invalidate meta pages only for post_read required inode
  BACKPORT: f2fs: fix to invalidate META_MAPPING before DIO write
  BACKPORT: f2fs: invalidate META_MAPPING before IPU/DIO write
  ANDROID: mm: page_pinner: use page_ext_get/put() to work with page_ext
  FROMLIST: mm: fix use-after free of page_ext after race with memory-offline
  ANDROID: vendor_hooks:vendor hook for __alloc_pages_slowpath.
  ANDROID: GKI: rockchip: add symbol netif_set_xps_queue
  ANDROID: GKI: Update symbol list
  Revert "ANDROID: vendor_hooks: tune reclaim scan type for specified mem_cgroup"
  ANDROID: Fix a build warning inside early_memblock_nomap
  ANDROID: mm/memory_hotplug: Fix error path handling
  Revert "ANDROID: add for tuning readahead size"
  Revert "ANDROID: vendor_hooks: Add hooks for mutex"
  ANDROID: fix execute bit on android/abi_gki_aarch64_asus
  ANDROID: avoid huge-page not to clear trylock-bit after shrink_page_list.
  ANDROID: vendor_hooks: Add hooks for oem futex optimization
  ANDROID: mm: memblock: avoid to create memmap for memblock nomap regions
  ANDROID: abi_gki_aarch64_qcom: Add android_vh_disable_thermal_cooling_stats
  ANDROID: thermal: vendor hook to disable thermal cooling stats
  ANDROID: GKI: Update symbols to symbol list
  ANDROID: GKI: rockchip: update fragment file
  ANDROID: GKI: rockchip: Enable symbols bcmdhd-sdio
  ANDROID: GKI: rockchip: Update symbols for rga driver
  BACKPORT: cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock
  UPSTREAM: cgroup: Elide write-locking threadgroup_rwsem when updating csses on an empty subtree
  ANDROID: GKI: Update symbol list for transsion
  ANDROID: vendor_hook: Add hook in __free_pages()
  ANDROID: vendor_hooks: Add hooks to extend the struct swap_info_struct
  ANDROID: vendor_hook: Add hook in si_swapinfo()
  ANDROID: GKI: Update symbols to symbol list
  ANDROID: Use rq_clock_task without CONFIG_SMP
  ANDROID: abi_gki_aarch64_qcom: Add skb and scatterlist helpers
  Revert "ANDROID: vendor_hook: Add hook in si_swapinfo()"
  Revert "ANDROID: vendor_hooks:vendor hook for pidfd_open"
  Revert "ANDROID: vendor_hooks: Add hooks to extend the struct swap_info_struct"
  Revert "ANDROID: vendor_hooks:vendor hook for mmput"
  ANDROID: GKI: Update symbols to symbol list
  ANDROID: Guard rq_clock_task_mult with CONFIG_SMP
  Revert "ANDROID: vendor_hook: Add hook in __free_pages()"
  Revert "ANDROID: vendor_hooks: Add hooks for binder"
  ANDROID: vendor_hook: add hooks to protect locking-tsk in cpu scheduler
  ANDROID: export reclaim_pages
  ANDROID: vendor_hook: Add hook to not be stuck ro rmap lock in kswapd or direct_reclaim

Change-Id: Id29a9448f424508e3b3e82c4f69959fa9da81699
Signed-off-by: Sivasri Kumar, Vanka <quic_svanka@quicinc.com>
This commit is contained in:
Sivasri Kumar, Vanka
2022-09-28 17:44:38 +05:30
55 zmienionych plików z 2153 dodań i 929 usunięć

Wyświetl plik

@@ -2328,6 +2328,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
}
EXPORT_SYMBOL_GPL(task_cgroup_path);
/**
* cgroup_attach_lock - Lock for ->attach()
* @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
*
* cgroup migration sometimes needs to stabilize threadgroups against forks and
* exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
* implementations (e.g. cpuset), also need to disable CPU hotplug.
* Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
* lead to deadlocks.
*
* Bringing up a CPU may involve creating new tasks which requires read-locking
* threadgroup_rwsem, so threadgroup_rwsem nests inside cpus_read_lock(). If we
* call an ->attach() which acquires the cpus lock while write-locking
* threadgroup_rwsem, the locking order is reversed and we end up waiting for an
* on-going CPU hotplug operation which in turn is waiting for the
* threadgroup_rwsem to be released to create new tasks. For more details:
*
* http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
*
* Resolve the situation by always acquiring cpus_read_lock() before optionally
* write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
* CPU hotplug is disabled on entry.
*/
static void cgroup_attach_lock(bool lock_threadgroup)
{
cpus_read_lock();
if (lock_threadgroup)
percpu_down_write(&cgroup_threadgroup_rwsem);
}
/**
* cgroup_attach_unlock - Undo cgroup_attach_lock()
* @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
*/
static void cgroup_attach_unlock(bool lock_threadgroup)
{
if (lock_threadgroup)
percpu_up_write(&cgroup_threadgroup_rwsem);
cpus_read_unlock();
}
/**
* cgroup_migrate_add_task - add a migration target task to a migration context
* @task: target task
@@ -2812,9 +2853,8 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
}
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
bool *locked,
bool *threadgroup_locked,
struct cgroup *dst_cgrp)
__acquires(&cgroup_threadgroup_rwsem)
{
struct task_struct *tsk;
pid_t pid;
@@ -2832,12 +2872,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
* Therefore, we can skip the global lock.
*/
lockdep_assert_held(&cgroup_mutex);
if (pid || threadgroup) {
percpu_down_write(&cgroup_threadgroup_rwsem);
*locked = true;
} else {
*locked = false;
}
*threadgroup_locked = pid || threadgroup;
cgroup_attach_lock(*threadgroup_locked);
rcu_read_lock();
if (pid) {
@@ -2871,17 +2907,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
goto out_unlock_rcu;
out_unlock_threadgroup:
if (*locked) {
percpu_up_write(&cgroup_threadgroup_rwsem);
*locked = false;
}
cgroup_attach_unlock(*threadgroup_locked);
*threadgroup_locked = false;
out_unlock_rcu:
rcu_read_unlock();
return tsk;
}
void cgroup_procs_write_finish(struct task_struct *task, bool locked)
__releases(&cgroup_threadgroup_rwsem)
void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked)
{
struct cgroup_subsys *ss;
int ssid;
@@ -2889,8 +2922,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked)
/* release reference from cgroup_procs_write_start() */
put_task_struct(task);
if (locked)
percpu_up_write(&cgroup_threadgroup_rwsem);
cgroup_attach_unlock(threadgroup_locked);
for_each_subsys(ss, ssid)
if (ss->post_attach)
ss->post_attach();
@@ -2945,12 +2978,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
struct cgroup_subsys_state *d_css;
struct cgroup *dsct;
struct ext_css_set *ext_src_set;
bool has_tasks;
int ret;
lockdep_assert_held(&cgroup_mutex);
percpu_down_write(&cgroup_threadgroup_rwsem);
/* look up all csses currently attached to @cgrp's subtree */
spin_lock_irq(&css_set_lock);
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
@@ -2961,6 +2993,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
}
spin_unlock_irq(&css_set_lock);
/*
* We need to write-lock threadgroup_rwsem while migrating tasks.
* However, if there are no source csets for @cgrp, changing its
* controllers isn't gonna produce any task migrations and the
* write-locking can be skipped safely.
*/
has_tasks = !list_empty(&mgctx.preloaded_src_csets);
cgroup_attach_lock(has_tasks);
/* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(&mgctx);
if (ret)
@@ -2980,7 +3021,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
ret = cgroup_migrate_execute(&mgctx);
out_finish:
cgroup_migrate_finish(&mgctx);
percpu_up_write(&cgroup_threadgroup_rwsem);
cgroup_attach_unlock(has_tasks);
return ret;
}
@@ -4855,13 +4896,13 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
struct task_struct *task;
const struct cred *saved_cred;
ssize_t ret;
bool locked;
bool threadgroup_locked;
dst_cgrp = cgroup_kn_lock_live(of->kn, false);
if (!dst_cgrp)
return -ENODEV;
task = cgroup_procs_write_start(buf, true, &locked, dst_cgrp);
task = cgroup_procs_write_start(buf, true, &threadgroup_locked, dst_cgrp);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@@ -4887,7 +4928,7 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
ret = cgroup_attach_task(dst_cgrp, task, true);
out_finish:
cgroup_procs_write_finish(task, locked);
cgroup_procs_write_finish(task, threadgroup_locked);
out_unlock:
cgroup_kn_unlock(of->kn);
@@ -4907,7 +4948,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
struct task_struct *task;
const struct cred *saved_cred;
ssize_t ret;
bool locked;
bool threadgroup_locked;
buf = strstrip(buf);
@@ -4915,7 +4956,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
if (!dst_cgrp)
return -ENODEV;
task = cgroup_procs_write_start(buf, false, &locked, dst_cgrp);
task = cgroup_procs_write_start(buf, false, &threadgroup_locked, dst_cgrp);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@@ -4941,7 +4982,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
ret = cgroup_attach_task(dst_cgrp, task, false);
out_finish:
cgroup_procs_write_finish(task, locked);
cgroup_procs_write_finish(task, threadgroup_locked);
out_unlock:
cgroup_kn_unlock(of->kn);

Wyświetl plik

@@ -2238,7 +2238,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
cpus_read_lock();
lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
mutex_lock(&cpuset_mutex);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
@@ -2292,7 +2292,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
wake_up(&cpuset_attach_wq);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
/* The various types of files and directories in a cpuset file system */

Wyświetl plik

@@ -1150,10 +1150,8 @@ void mmput(struct mm_struct *mm)
{
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
trace_android_vh_mmput(NULL);
if (atomic_dec_and_test(&mm->mm_users))
__mmput(mm);
}
}
EXPORT_SYMBOL_GPL(mmput);

Wyświetl plik

@@ -1594,6 +1594,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
struct futex_q *this, *next;
union futex_key key = FUTEX_KEY_INIT;
int ret;
int target_nr;
DEFINE_WAKE_Q(wake_q);
if (!bitset)
@@ -1611,6 +1612,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
spin_lock(&hb->lock);
trace_android_vh_futex_wake_traverse_plist(&hb->chain, &target_nr, key, bitset);
plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (match_futex (&this->key, &key)) {
if (this->pi_state || this->rt_waiter) {
@@ -1622,6 +1624,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
if (!(this->bitset & bitset))
continue;
trace_android_vh_futex_wake_this(ret, nr_wake, target_nr, this->task);
mark_wake_futex(&wake_q, this);
if (++ret >= nr_wake)
break;
@@ -1630,6 +1633,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
spin_unlock(&hb->lock);
wake_up_q(&wake_q);
trace_android_vh_futex_wake_up_q_finish(nr_wake, target_nr);
return ret;
}
@@ -2699,6 +2703,7 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
if (!bitset)
return -EINVAL;
q.bitset = bitset;
trace_android_vh_futex_wait_start(flags, bitset);
to = futex_setup_timer(abs_time, &timeout, flags,
current->timer_slack_ns);
@@ -2748,6 +2753,7 @@ out:
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
trace_android_vh_futex_wait_end(flags, bitset);
return ret;
}
@@ -3733,6 +3739,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
return -ENOSYS;
}
trace_android_vh_do_futex(cmd, &flags, uaddr2);
switch (cmd) {
case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY;

Wyświetl plik

@@ -170,8 +170,10 @@ static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
unsigned long curr = (unsigned long)current;
unsigned long zero = 0UL;
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) {
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
return true;
}
return false;
}
@@ -748,6 +750,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
*/
void __sched mutex_unlock(struct mutex *lock)
{
trace_android_vh_record_mutex_lock_starttime(current, 0);
#ifndef CONFIG_DEBUG_LOCK_ALLOC
if (__mutex_unlock_fast(lock))
return;
@@ -978,6 +981,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
lock_acquired(&lock->dep_map, ip);
if (ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx);
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
preempt_enable();
return 0;
}
@@ -1049,7 +1053,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto err;
}
trace_android_vh_mutex_start_check_new_owner(lock);
spin_unlock(&lock->wait_lock);
schedule_preempt_disabled();
@@ -1097,6 +1100,7 @@ skip_wait:
spin_unlock(&lock->wait_lock);
preempt_enable();
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
return 0;
err:
@@ -1433,8 +1437,10 @@ int __sched mutex_trylock(struct mutex *lock)
#endif
locked = __mutex_trylock(lock);
if (locked)
if (locked) {
trace_android_vh_record_mutex_lock_starttime(current, jiffies);
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
}
return locked;
}

Wyświetl plik

@@ -10,6 +10,21 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <trace/hooks/dtask.h>
/*
* trace_android_vh_record_percpu_rwsem_lock_starttime is called in
* include/linux/percpu-rwsem.h by including include/hooks/dtask.h, which
* will result to build-err. So we create
* func:_trace_android_vh_record_percpu_rwsem_lock_starttime for percpu-rwsem.h to call.
*/
void _trace_android_vh_record_percpu_rwsem_lock_starttime(struct task_struct *tsk,
unsigned long settime)
{
trace_android_vh_record_percpu_rwsem_lock_starttime(tsk, settime);
}
EXPORT_SYMBOL_GPL(_trace_android_vh_record_percpu_rwsem_lock_starttime);
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
const char *name, struct lock_class_key *key)
{
@@ -237,11 +252,13 @@ void percpu_down_write(struct percpu_rw_semaphore *sem)
/* Wait for all active readers to complete. */
rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
trace_android_vh_record_percpu_rwsem_lock_starttime(current, jiffies);
}
EXPORT_SYMBOL_GPL(percpu_down_write);
void percpu_up_write(struct percpu_rw_semaphore *sem)
{
trace_android_vh_record_percpu_rwsem_lock_starttime(current, 0);
rwsem_release(&sem->dep_map, _RET_IP_);
/*

Wyświetl plik

@@ -1471,6 +1471,7 @@ static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -1519,6 +1520,8 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, _RET_IP_);
else
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
return ret;
}
@@ -1563,6 +1566,8 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, _RET_IP_);
else
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
return ret;
}
@@ -1589,6 +1594,8 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
else
trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
return ret;
}
@@ -1603,6 +1610,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, _RET_IP_);
rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
trace_android_vh_record_rtmutex_lock_starttime(current, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);

Wyświetl plik

@@ -279,6 +279,10 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
if (WARN_ON_ONCE(cnt < 0))
rwsem_set_nonspinnable(sem);
if ((cnt & RWSEM_READ_FAILED_MASK) == 0)
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return !(cnt & RWSEM_READ_FAILED_MASK);
}
@@ -1021,9 +1025,11 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q);
}
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
/* rwsem_reader_phase_trylock() implies ACQUIRE on success */
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
}
@@ -1104,6 +1110,7 @@ queue:
__set_current_state(TASK_RUNNING);
trace_android_vh_rwsem_read_wait_finish(sem);
lockevent_inc(rwsem_rlock);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
out_nolock:
@@ -1150,6 +1157,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
rwsem_optimistic_spin(sem, true)) {
/* rwsem_optimistic_spin() implies ACQUIRE on success */
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return sem;
}
@@ -1280,7 +1288,7 @@ trylock_again:
rwsem_disable_reader_optspin(sem, disable_rspin);
raw_spin_unlock_irq(&sem->wait_lock);
lockevent_inc(rwsem_wlock);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return ret;
out_nolock:
@@ -1396,6 +1404,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
tmp + RWSEM_READER_BIAS)) {
rwsem_set_reader_owned(sem);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return 1;
}
} while (!(tmp & RWSEM_READ_FAILED_MASK));
@@ -1410,10 +1419,12 @@ static inline void __down_write(struct rw_semaphore *sem)
long tmp = RWSEM_UNLOCKED_VALUE;
if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED)))
RWSEM_WRITER_LOCKED))) {
rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
else
} else {
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
rwsem_set_owner(sem);
}
}
static inline int __down_write_killable(struct rw_semaphore *sem)
@@ -1425,6 +1436,7 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
return -EINTR;
} else {
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
rwsem_set_owner(sem);
}
return 0;
@@ -1440,6 +1452,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem);
trace_android_vh_record_rwsem_lock_starttime(current, jiffies);
return true;
}
return false;
@@ -1455,6 +1468,7 @@ static inline void __up_read(struct rw_semaphore *sem)
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
trace_android_vh_record_rwsem_lock_starttime(current, 0);
rwsem_clear_reader_owned(sem);
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
@@ -1481,6 +1495,7 @@ static inline void __up_write(struct rw_semaphore *sem)
DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
!rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
trace_android_vh_record_rwsem_lock_starttime(current, 0);
rwsem_clear_owner(sem);
tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
if (unlikely(tmp & RWSEM_FLAG_WAITERS))

Wyświetl plik

@@ -45,9 +45,6 @@
#include <net/sock.h>
#include <uapi/linux/pidfd.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/sched.h>
struct pid init_struct_pid = {
.count = REFCOUNT_INIT(1),
.tasks = {
@@ -605,7 +602,6 @@ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
else
fd = -EINVAL;
trace_android_vh_pidfd_open(p);
put_pid(p);
return fd;
}

Wyświetl plik

@@ -1400,7 +1400,6 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
return sighand;
}
EXPORT_SYMBOL_GPL(__lock_task_sighand);
/*
* send signal info to all the members of a group