Merge Linus's 5.4-rc1-prerelease branch into android-mainline
This merges Linus's tree as of commit b41dae061b
("Merge tag
'xfs-5.4-merge-7' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux")
into android-mainline.
This "early" merge makes it easier to test and handle merge conflicts
instead of having to wait until the "end" of the merge window and handle
all 10000+ commits at once.
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I6bebf55e5e2353f814e3c87f5033607b1ae5d812
This commit is contained in:
@@ -25,9 +25,11 @@
|
||||
#include <linux/resource.h>
|
||||
#include <linux/latencytop.h>
|
||||
#include <linux/sched/prio.h>
|
||||
#include <linux/sched/types.h>
|
||||
#include <linux/signal_types.h>
|
||||
#include <linux/mm_types_task.h>
|
||||
#include <linux/task_io_accounting.h>
|
||||
#include <linux/posix-timers.h>
|
||||
#include <linux/rseq.h>
|
||||
|
||||
/* task_struct member predeclarations (sorted alphabetically): */
|
||||
@@ -244,27 +246,6 @@ struct prev_cputime {
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* struct task_cputime - collected CPU time counts
|
||||
* @utime: time spent in user mode, in nanoseconds
|
||||
* @stime: time spent in kernel mode, in nanoseconds
|
||||
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
|
||||
*
|
||||
* This structure groups together three kinds of CPU time that are tracked for
|
||||
* threads and thread groups. Most things considering CPU time want to group
|
||||
* these counts together and treat all three of them in parallel.
|
||||
*/
|
||||
struct task_cputime {
|
||||
u64 utime;
|
||||
u64 stime;
|
||||
unsigned long long sum_exec_runtime;
|
||||
};
|
||||
|
||||
/* Alternate field names when used on cache expirations: */
|
||||
#define virt_exp utime
|
||||
#define prof_exp stime
|
||||
#define sched_exp sum_exec_runtime
|
||||
|
||||
enum vtime_state {
|
||||
/* Task is sleeping or running in a CPU with VTIME inactive: */
|
||||
VTIME_INACTIVE = 0,
|
||||
@@ -295,6 +276,11 @@ enum uclamp_id {
|
||||
UCLAMP_CNT
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern struct root_domain def_root_domain;
|
||||
extern struct mutex sched_domains_mutex;
|
||||
#endif
|
||||
|
||||
struct sched_info {
|
||||
#ifdef CONFIG_SCHED_INFO
|
||||
/* Cumulative counters: */
|
||||
@@ -880,10 +866,8 @@ struct task_struct {
|
||||
unsigned long min_flt;
|
||||
unsigned long maj_flt;
|
||||
|
||||
#ifdef CONFIG_POSIX_TIMERS
|
||||
struct task_cputime cputime_expires;
|
||||
struct list_head cpu_timers[3];
|
||||
#endif
|
||||
/* Empty if CONFIG_POSIX_CPUTIMERS=n */
|
||||
struct posix_cputimers posix_cputimers;
|
||||
|
||||
/* Process credentials: */
|
||||
|
||||
@@ -1771,7 +1755,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
|
||||
* value indicates whether a reschedule was done in fact.
|
||||
* cond_resched_lock() will drop the spinlock before scheduling,
|
||||
*/
|
||||
#ifndef CONFIG_PREEMPT
|
||||
#ifndef CONFIG_PREEMPTION
|
||||
extern int _cond_resched(void);
|
||||
#else
|
||||
static inline int _cond_resched(void) { return 0; }
|
||||
@@ -1800,12 +1784,12 @@ static inline void cond_resched_rcu(void)
|
||||
|
||||
/*
|
||||
* Does a critical section need to be broken due to another
|
||||
* task waiting?: (technically does not depend on CONFIG_PREEMPT,
|
||||
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
|
||||
* but a general need for low latency)
|
||||
*/
|
||||
static inline int spin_needbreak(spinlock_t *lock)
|
||||
{
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
return spin_is_contended(lock);
|
||||
#else
|
||||
return 0;
|
||||
|
Reference in New Issue
Block a user