Merge branch 'linus' into perfcounters/core
Conflicts: fs/exec.c include/linux/init_task.h Simple context conflicts.
This commit is contained in:
@@ -97,6 +97,7 @@ struct exec_domain;
|
||||
struct futex_pi_state;
|
||||
struct robust_list_head;
|
||||
struct bio;
|
||||
struct bts_tracer;
|
||||
|
||||
/*
|
||||
* List of flags we want to share for kernel threads,
|
||||
@@ -260,8 +261,6 @@ static inline int select_nohz_load_balancer(int cpu)
|
||||
}
|
||||
#endif
|
||||
|
||||
extern unsigned long rt_needs_cpu(int cpu);
|
||||
|
||||
/*
|
||||
* Only dump TASK_* tasks. (0 for all tasks)
|
||||
*/
|
||||
@@ -573,12 +572,6 @@ struct signal_struct {
|
||||
*/
|
||||
struct rlimit rlim[RLIM_NLIMITS];
|
||||
|
||||
/* keep the process-shared keyrings here so that they do the right
|
||||
* thing in threads created with CLONE_THREAD */
|
||||
#ifdef CONFIG_KEYS
|
||||
struct key *session_keyring; /* keyring inherited over fork */
|
||||
struct key *process_keyring; /* keyring private to this process */
|
||||
#endif
|
||||
#ifdef CONFIG_BSD_PROCESS_ACCT
|
||||
struct pacct_struct pacct; /* per-process accounting information */
|
||||
#endif
|
||||
@@ -649,6 +642,7 @@ struct user_struct {
|
||||
/* Hash table maintenance information */
|
||||
struct hlist_node uidhash_node;
|
||||
uid_t uid;
|
||||
struct user_namespace *user_ns;
|
||||
|
||||
#ifdef CONFIG_USER_SCHED
|
||||
struct task_group *tg;
|
||||
@@ -666,6 +660,7 @@ extern struct user_struct *find_user(uid_t);
|
||||
extern struct user_struct root_user;
|
||||
#define INIT_USER (&root_user)
|
||||
|
||||
|
||||
struct backing_dev_info;
|
||||
struct reclaim_state;
|
||||
|
||||
@@ -673,8 +668,7 @@ struct reclaim_state;
|
||||
struct sched_info {
|
||||
/* cumulative counters */
|
||||
unsigned long pcount; /* # of times run on this cpu */
|
||||
unsigned long long cpu_time, /* time spent on the cpu */
|
||||
run_delay; /* time spent waiting on a runqueue */
|
||||
unsigned long long run_delay; /* time spent waiting on a runqueue */
|
||||
|
||||
/* timestamps */
|
||||
unsigned long long last_arrival,/* when we last ran on a cpu */
|
||||
@@ -889,38 +883,7 @@ partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
struct io_context; /* See blkdev.h */
|
||||
#define NGROUPS_SMALL 32
|
||||
#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
|
||||
struct group_info {
|
||||
int ngroups;
|
||||
atomic_t usage;
|
||||
gid_t small_block[NGROUPS_SMALL];
|
||||
int nblocks;
|
||||
gid_t *blocks[0];
|
||||
};
|
||||
|
||||
/*
|
||||
* get_group_info() must be called with the owning task locked (via task_lock())
|
||||
* when task != current. The reason being that the vast majority of callers are
|
||||
* looking at current->group_info, which can not be changed except by the
|
||||
* current task. Changing current->group_info requires the task lock, too.
|
||||
*/
|
||||
#define get_group_info(group_info) do { \
|
||||
atomic_inc(&(group_info)->usage); \
|
||||
} while (0)
|
||||
|
||||
#define put_group_info(group_info) do { \
|
||||
if (atomic_dec_and_test(&(group_info)->usage)) \
|
||||
groups_free(group_info); \
|
||||
} while (0)
|
||||
|
||||
extern struct group_info *groups_alloc(int gidsetsize);
|
||||
extern void groups_free(struct group_info *group_info);
|
||||
extern int set_current_groups(struct group_info *group_info);
|
||||
extern int groups_search(struct group_info *group_info, gid_t grp);
|
||||
/* access the groups "array" with this macro */
|
||||
#define GROUP_AT(gi, i) \
|
||||
((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
|
||||
|
||||
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
|
||||
extern void prefetch_stack(struct task_struct *t);
|
||||
@@ -1167,6 +1130,19 @@ struct task_struct {
|
||||
struct list_head ptraced;
|
||||
struct list_head ptrace_entry;
|
||||
|
||||
#ifdef CONFIG_X86_PTRACE_BTS
|
||||
/*
|
||||
* This is the tracer handle for the ptrace BTS extension.
|
||||
* This field actually belongs to the ptracer task.
|
||||
*/
|
||||
struct bts_tracer *bts;
|
||||
/*
|
||||
* The buffer to hold the BTS data.
|
||||
*/
|
||||
void *bts_buffer;
|
||||
size_t bts_size;
|
||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
||||
|
||||
/* PID/PID hash table linkage. */
|
||||
struct pid_link pids[PIDTYPE_MAX];
|
||||
struct list_head thread_group;
|
||||
@@ -1188,17 +1164,12 @@ struct task_struct {
|
||||
struct list_head cpu_timers[3];
|
||||
|
||||
/* process credentials */
|
||||
uid_t uid,euid,suid,fsuid;
|
||||
gid_t gid,egid,sgid,fsgid;
|
||||
struct group_info *group_info;
|
||||
kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
|
||||
struct user_struct *user;
|
||||
unsigned securebits;
|
||||
#ifdef CONFIG_KEYS
|
||||
unsigned char jit_keyring; /* default keyring to attach requested keys to */
|
||||
struct key *request_key_auth; /* assumed request_key authority */
|
||||
struct key *thread_keyring; /* keyring private to this thread */
|
||||
#endif
|
||||
const struct cred *real_cred; /* objective and real subjective task
|
||||
* credentials (COW) */
|
||||
const struct cred *cred; /* effective (overridable) subjective task
|
||||
* credentials (COW) */
|
||||
struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */
|
||||
|
||||
char comm[TASK_COMM_LEN]; /* executable name excluding path
|
||||
- access with [gs]et_task_comm (which lock
|
||||
it with task_lock())
|
||||
@@ -1235,9 +1206,6 @@ struct task_struct {
|
||||
int (*notifier)(void *priv);
|
||||
void *notifier_data;
|
||||
sigset_t *notifier_mask;
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *security;
|
||||
#endif
|
||||
struct audit_context *audit_context;
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
uid_t loginuid;
|
||||
@@ -1359,6 +1327,23 @@ struct task_struct {
|
||||
unsigned long default_timer_slack_ns;
|
||||
|
||||
struct list_head *scm_work_list;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/* Index of current stored adress in ret_stack */
|
||||
int curr_ret_stack;
|
||||
/* Stack of return addresses for return function tracing */
|
||||
struct ftrace_ret_stack *ret_stack;
|
||||
/*
|
||||
* Number of functions that haven't been traced
|
||||
* because of depth overrun.
|
||||
*/
|
||||
atomic_t trace_overrun;
|
||||
/* Pause for the tracing */
|
||||
atomic_t tracing_graph_pause;
|
||||
#endif
|
||||
#ifdef CONFIG_TRACING
|
||||
/* state flags for use by tracers */
|
||||
unsigned long trace;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -1778,7 +1763,6 @@ static inline struct user_struct *get_uid(struct user_struct *u)
|
||||
return u;
|
||||
}
|
||||
extern void free_uid(struct user_struct *);
|
||||
extern void switch_uid(struct user_struct *);
|
||||
extern void release_uids(struct user_namespace *ns);
|
||||
|
||||
#include <asm/current.h>
|
||||
@@ -1797,9 +1781,6 @@ extern void wake_up_new_task(struct task_struct *tsk,
|
||||
extern void sched_fork(struct task_struct *p, int clone_flags);
|
||||
extern void sched_dead(struct task_struct *p);
|
||||
|
||||
extern int in_group_p(gid_t);
|
||||
extern int in_egroup_p(gid_t);
|
||||
|
||||
extern void proc_caches_init(void);
|
||||
extern void flush_signals(struct task_struct *);
|
||||
extern void ignore_signals(struct task_struct *);
|
||||
@@ -1931,6 +1912,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
|
||||
#define for_each_process(p) \
|
||||
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
|
||||
|
||||
extern bool is_single_threaded(struct task_struct *);
|
||||
|
||||
/*
|
||||
* Careful: do_each_thread/while_each_thread is a double loop so
|
||||
* 'break' will not work as expected - use goto instead.
|
||||
@@ -2227,6 +2210,7 @@ extern void normalize_rt_tasks(void);
|
||||
extern struct task_group init_task_group;
|
||||
#ifdef CONFIG_USER_SCHED
|
||||
extern struct task_group root_task_group;
|
||||
extern void set_tg_uid(struct user_struct *user);
|
||||
#endif
|
||||
|
||||
extern struct task_group *sched_create_group(struct task_group *parent);
|
||||
|
Reference in New Issue
Block a user