Merge remote-tracking branch into HEAD
* keystone/mirror-android12-5.10-2022-05: (135 commits) BACKPORT: can: usb_8dev: usb_8dev_start_xmit(): fix double dev_kfree_skb() in error path ANDROID: GKI: Update symbols to symbol list ANDROID: oplus: Update the ABI xml and symbol list UPSTREAM: remoteproc: Fix count check in rproc_coredump_write() BACKPORT: esp: Fix possible buffer overflow in ESP transformation ANDROID: Fix the drain_all_pages default condition broken by a hook UPSTREAM: Revert "xfrm: xfrm_state_mtu should return at least 1280 for ipv6" UPSTREAM: xfrm: fix MTU regression ANDROID: signal: Add vendor hook for memory reaping FROMGIT: usb: gadget: uvc: allow for application to cleanly shutdown FROMGIT: usb: dwc3: gadget: increase tx fifo size for ss isoc endpoints UPSTREAM: usb: gadget: configfs: clear deactivation flag in configfs_composite_unbind() FROMGIT: usb: gadget: uvc: remove pause flag use FROMGIT: usb: gadget: uvc: allow changing interface name via configfs UPSTREAM: usb: gadget: uvc: Fix crash when encoding data for usb request UPSTREAM: usb: gadget: uvc: test if ep->desc is valid on ep_queue UPSTREAM: usb: gadget: uvc: only pump video data if necessary UPSTREAM: usb: gadget: uvc: only schedule stream in streaming state UPSTREAM: usb: dwc3: gadget: Give some time to schedule isoc UPSTREAM: usb: gadget: uvc: make uvc_num_requests depend on gadget speed ... Change-Id: I438ffbf5441deb75dadf2150e235232bc53c37ea
This commit is contained in:
@@ -1293,6 +1293,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
|
||||
spin_unlock(&lock->wait_lock);
|
||||
|
||||
wake_up_q(&wake_q);
|
||||
trace_android_vh_mutex_unlock_slowpath_end(lock, next);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
@@ -176,6 +176,7 @@
|
||||
static inline void rwsem_set_owner(struct rw_semaphore *sem)
|
||||
{
|
||||
atomic_long_set(&sem->owner, (long)current);
|
||||
trace_android_vh_rwsem_set_owner(sem);
|
||||
}
|
||||
|
||||
static inline void rwsem_clear_owner(struct rw_semaphore *sem)
|
||||
@@ -213,6 +214,7 @@ static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
|
||||
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
|
||||
{
|
||||
__rwsem_set_reader_owned(sem, current);
|
||||
trace_android_vh_rwsem_set_reader_owned(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -496,6 +498,7 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
|
||||
woken++;
|
||||
list_move_tail(&waiter->list, &wlist);
|
||||
|
||||
trace_android_vh_rwsem_mark_wake_readers(sem, waiter);
|
||||
/*
|
||||
* Limit # of readers that can be woken up per wakeup call.
|
||||
*/
|
||||
@@ -1460,6 +1463,7 @@ static inline void __up_read(struct rw_semaphore *sem)
|
||||
clear_wr_nonspinnable(sem);
|
||||
rwsem_wake(sem, tmp);
|
||||
}
|
||||
trace_android_vh_rwsem_up_read_end(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1481,6 +1485,7 @@ static inline void __up_write(struct rw_semaphore *sem)
|
||||
tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
|
||||
if (unlikely(tmp & RWSEM_FLAG_WAITERS))
|
||||
rwsem_wake(sem, tmp);
|
||||
trace_android_vh_rwsem_up_write_end(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -1420,6 +1420,7 @@ int group_send_sig_info(int sig, struct kernel_siginfo *info,
|
||||
bool reap = false;
|
||||
|
||||
trace_android_vh_process_killed(current, &reap);
|
||||
trace_android_vh_killed_process(current, p, &reap);
|
||||
if (reap)
|
||||
add_to_oom_reaper(p);
|
||||
}
|
||||
|
@@ -2052,6 +2052,32 @@ unsigned long msleep_interruptible(unsigned int msecs)
|
||||
|
||||
EXPORT_SYMBOL(msleep_interruptible);
|
||||
|
||||
/**
|
||||
* usleep_range_state - Sleep for an approximate time in a given state
|
||||
* @min: Minimum time in usecs to sleep
|
||||
* @max: Maximum time in usecs to sleep
|
||||
* @state: State of the current task that will be while sleeping
|
||||
*
|
||||
* In non-atomic context where the exact wakeup time is flexible, use
|
||||
* usleep_range_state() instead of udelay(). The sleep improves responsiveness
|
||||
* by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
|
||||
* power usage by allowing hrtimers to take advantage of an already-
|
||||
* scheduled interrupt instead of scheduling a new one just for this sleep.
|
||||
*/
|
||||
void __sched usleep_range_state(unsigned long min, unsigned long max,
|
||||
unsigned int state)
|
||||
{
|
||||
ktime_t exp = ktime_add_us(ktime_get(), min);
|
||||
u64 delta = (u64)(max - min) * NSEC_PER_USEC;
|
||||
|
||||
for (;;) {
|
||||
__set_current_state(state);
|
||||
/* Do not return before the requested sleep time has elapsed */
|
||||
if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* usleep_range - Sleep for an approximate time
|
||||
* @min: Minimum time in usecs to sleep
|
||||
@@ -2065,14 +2091,6 @@ EXPORT_SYMBOL(msleep_interruptible);
|
||||
*/
|
||||
void __sched usleep_range(unsigned long min, unsigned long max)
|
||||
{
|
||||
ktime_t exp = ktime_add_us(ktime_get(), min);
|
||||
u64 delta = (u64)(max - min) * NSEC_PER_USEC;
|
||||
|
||||
for (;;) {
|
||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
/* Do not return before the requested sleep time has elapsed */
|
||||
if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
|
||||
break;
|
||||
}
|
||||
usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
EXPORT_SYMBOL(usleep_range);
|
||||
|
مرجع در شماره جدید
Block a user