Merge branch 'linus' into irq/threaded
Conflicts: include/linux/irq.h kernel/irq/handle.c
This commit is contained in:
@@ -68,7 +68,7 @@ struct sched_param {
|
||||
#include <linux/smp.h>
|
||||
#include <linux/sem.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/fs_struct.h>
|
||||
#include <linux/path.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/pid.h>
|
||||
@@ -97,6 +97,7 @@ struct futex_pi_state;
|
||||
struct robust_list_head;
|
||||
struct bio;
|
||||
struct bts_tracer;
|
||||
struct fs_struct;
|
||||
|
||||
/*
|
||||
* List of flags we want to share for kernel threads,
|
||||
@@ -137,6 +138,8 @@ extern unsigned long nr_uninterruptible(void);
|
||||
extern unsigned long nr_active(void);
|
||||
extern unsigned long nr_iowait(void);
|
||||
|
||||
extern unsigned long get_parent_ip(unsigned long addr);
|
||||
|
||||
struct seq_file;
|
||||
struct cfs_rq;
|
||||
struct task_group;
|
||||
@@ -331,7 +334,9 @@ extern signed long schedule_timeout(signed long timeout);
|
||||
extern signed long schedule_timeout_interruptible(signed long timeout);
|
||||
extern signed long schedule_timeout_killable(signed long timeout);
|
||||
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
||||
asmlinkage void __schedule(void);
|
||||
asmlinkage void schedule(void);
|
||||
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
|
||||
|
||||
struct nsproxy;
|
||||
struct user_namespace;
|
||||
@@ -389,8 +394,15 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
|
||||
(mm)->hiwater_vm = (mm)->total_vm; \
|
||||
} while (0)
|
||||
|
||||
#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm))
|
||||
#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm)
|
||||
static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
|
||||
{
|
||||
return max(mm->hiwater_rss, get_mm_rss(mm));
|
||||
}
|
||||
|
||||
static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
|
||||
{
|
||||
return max(mm->hiwater_vm, mm->total_vm);
|
||||
}
|
||||
|
||||
extern void set_dumpable(struct mm_struct *mm, int value);
|
||||
extern int get_dumpable(struct mm_struct *mm);
|
||||
@@ -538,25 +550,8 @@ struct signal_struct {
|
||||
|
||||
struct list_head cpu_timers[3];
|
||||
|
||||
/* job control IDs */
|
||||
|
||||
/*
|
||||
* pgrp and session fields are deprecated.
|
||||
* use the task_session_Xnr and task_pgrp_Xnr routines below
|
||||
*/
|
||||
|
||||
union {
|
||||
pid_t pgrp __deprecated;
|
||||
pid_t __pgrp;
|
||||
};
|
||||
|
||||
struct pid *tty_old_pgrp;
|
||||
|
||||
union {
|
||||
pid_t session __deprecated;
|
||||
pid_t __session;
|
||||
};
|
||||
|
||||
/* boolean value for session group leader */
|
||||
int leader;
|
||||
|
||||
@@ -998,6 +993,7 @@ struct sched_class {
|
||||
struct rq *busiest, struct sched_domain *sd,
|
||||
enum cpu_idle_type idle);
|
||||
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
|
||||
int (*needs_post_schedule) (struct rq *this_rq);
|
||||
void (*post_schedule) (struct rq *this_rq);
|
||||
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
|
||||
|
||||
@@ -1052,6 +1048,10 @@ struct sched_entity {
|
||||
u64 last_wakeup;
|
||||
u64 avg_overlap;
|
||||
|
||||
u64 start_runtime;
|
||||
u64 avg_wakeup;
|
||||
u64 nr_migrations;
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
u64 wait_start;
|
||||
u64 wait_max;
|
||||
@@ -1067,7 +1067,6 @@ struct sched_entity {
|
||||
u64 exec_max;
|
||||
u64 slice_max;
|
||||
|
||||
u64 nr_migrations;
|
||||
u64 nr_migrations_cold;
|
||||
u64 nr_failed_migrations_affine;
|
||||
u64 nr_failed_migrations_running;
|
||||
@@ -1164,6 +1163,7 @@ struct task_struct {
|
||||
#endif
|
||||
|
||||
struct list_head tasks;
|
||||
struct plist_node pushable_tasks;
|
||||
|
||||
struct mm_struct *mm, *active_mm;
|
||||
|
||||
@@ -1175,6 +1175,8 @@ struct task_struct {
|
||||
/* ??? */
|
||||
unsigned int personality;
|
||||
unsigned did_exec:1;
|
||||
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
|
||||
* execve */
|
||||
pid_t pid;
|
||||
pid_t tgid;
|
||||
|
||||
@@ -1332,6 +1334,7 @@ struct task_struct {
|
||||
int lockdep_depth;
|
||||
unsigned int lockdep_recursion;
|
||||
struct held_lock held_locks[MAX_LOCK_DEPTH];
|
||||
gfp_t lockdep_reclaim_gfp;
|
||||
#endif
|
||||
|
||||
/* journalling filesystem info */
|
||||
@@ -1409,6 +1412,8 @@ struct task_struct {
|
||||
int curr_ret_stack;
|
||||
/* Stack of return addresses for return function tracing */
|
||||
struct ftrace_ret_stack *ret_stack;
|
||||
/* time stamp for last schedule */
|
||||
unsigned long long ftrace_timestamp;
|
||||
/*
|
||||
* Number of functions that haven't been traced
|
||||
* because of depth overrun.
|
||||
@@ -1457,16 +1462,6 @@ static inline int rt_task(struct task_struct *p)
|
||||
return rt_prio(p->prio);
|
||||
}
|
||||
|
||||
static inline void set_task_session(struct task_struct *tsk, pid_t session)
|
||||
{
|
||||
tsk->signal->__session = session;
|
||||
}
|
||||
|
||||
static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
|
||||
{
|
||||
tsk->signal->__pgrp = pgrp;
|
||||
}
|
||||
|
||||
static inline struct pid *task_pid(struct task_struct *task)
|
||||
{
|
||||
return task->pids[PIDTYPE_PID].pid;
|
||||
@@ -1477,6 +1472,11 @@ static inline struct pid *task_tgid(struct task_struct *task)
|
||||
return task->group_leader->pids[PIDTYPE_PID].pid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Without tasklist or rcu lock it is not safe to dereference
|
||||
* the result of task_pgrp/task_session even if task == current,
|
||||
* we can race with another thread doing sys_setsid/sys_setpgid.
|
||||
*/
|
||||
static inline struct pid *task_pgrp(struct task_struct *task)
|
||||
{
|
||||
return task->group_leader->pids[PIDTYPE_PGID].pid;
|
||||
@@ -1502,17 +1502,23 @@ struct pid_namespace;
|
||||
*
|
||||
* see also pid_nr() etc in include/linux/pid.h
|
||||
*/
|
||||
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
|
||||
struct pid_namespace *ns);
|
||||
|
||||
static inline pid_t task_pid_nr(struct task_struct *tsk)
|
||||
{
|
||||
return tsk->pid;
|
||||
}
|
||||
|
||||
pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
|
||||
static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
|
||||
struct pid_namespace *ns)
|
||||
{
|
||||
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
|
||||
}
|
||||
|
||||
static inline pid_t task_pid_vnr(struct task_struct *tsk)
|
||||
{
|
||||
return pid_vnr(task_pid(tsk));
|
||||
return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
|
||||
}
|
||||
|
||||
|
||||
@@ -1529,31 +1535,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
|
||||
}
|
||||
|
||||
|
||||
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
|
||||
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
|
||||
struct pid_namespace *ns)
|
||||
{
|
||||
return tsk->signal->__pgrp;
|
||||
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
|
||||
}
|
||||
|
||||
pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
|
||||
|
||||
static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
|
||||
{
|
||||
return pid_vnr(task_pgrp(tsk));
|
||||
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
|
||||
}
|
||||
|
||||
|
||||
static inline pid_t task_session_nr(struct task_struct *tsk)
|
||||
static inline pid_t task_session_nr_ns(struct task_struct *tsk,
|
||||
struct pid_namespace *ns)
|
||||
{
|
||||
return tsk->signal->__session;
|
||||
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
|
||||
}
|
||||
|
||||
pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
|
||||
|
||||
static inline pid_t task_session_vnr(struct task_struct *tsk)
|
||||
{
|
||||
return pid_vnr(task_session(tsk));
|
||||
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
|
||||
}
|
||||
|
||||
/* obsolete, do not use */
|
||||
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
|
||||
{
|
||||
return task_pgrp_nr_ns(tsk, &init_pid_ns);
|
||||
}
|
||||
|
||||
/**
|
||||
* pid_alive - check that a task structure is not stale
|
||||
@@ -1677,6 +1686,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
||||
return set_cpus_allowed_ptr(p, &new_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Architectures can set this to 1 if they have specified
|
||||
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
|
||||
* but then during bootup it turns out that sched_clock()
|
||||
* is reliable after all:
|
||||
*/
|
||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
extern int sched_clock_stable;
|
||||
#endif
|
||||
|
||||
extern unsigned long long sched_clock(void);
|
||||
|
||||
extern void sched_clock_init(void);
|
||||
@@ -1953,7 +1972,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *);
|
||||
/* Allocate a new mm structure and copy contents from tsk->mm */
|
||||
extern struct mm_struct *dup_mm(struct task_struct *tsk);
|
||||
|
||||
extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
|
||||
extern int copy_thread(unsigned long, unsigned long, unsigned long,
|
||||
struct task_struct *, struct pt_regs *);
|
||||
extern void flush_thread(void);
|
||||
extern void exit_thread(void);
|
||||
|
||||
@@ -2038,6 +2058,11 @@ static inline int thread_group_empty(struct task_struct *p)
|
||||
#define delay_group_leader(p) \
|
||||
(thread_group_leader(p) && !thread_group_empty(p))
|
||||
|
||||
static inline int task_detached(struct task_struct *p)
|
||||
{
|
||||
return p->exit_signal == -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
|
||||
* subscriptions and synchronises with wait4(). Also used in procfs. Also
|
||||
|
Reference in New Issue
Block a user