Merge branch 'linus' into stackprotector
Conflicts: arch/x86/kernel/Makefile include/asm-x86/pda.h
This commit is contained in:
@@ -87,6 +87,7 @@ struct sched_param {
|
||||
#include <linux/task_io_accounting.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/latencytop.h>
|
||||
#include <linux/cred.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
||||
@@ -134,7 +135,6 @@ extern unsigned long nr_running(void);
|
||||
extern unsigned long nr_uninterruptible(void);
|
||||
extern unsigned long nr_active(void);
|
||||
extern unsigned long nr_iowait(void);
|
||||
extern unsigned long weighted_cpuload(const int cpu);
|
||||
|
||||
struct seq_file;
|
||||
struct cfs_rq;
|
||||
@@ -246,6 +246,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
|
||||
extern void init_idle(struct task_struct *idle, int cpu);
|
||||
extern void init_idle_bootup_task(struct task_struct *idle);
|
||||
|
||||
extern int runqueue_is_locked(void);
|
||||
|
||||
extern cpumask_t nohz_cpu_mask;
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
|
||||
extern int select_nohz_load_balancer(int cpu);
|
||||
@@ -291,13 +293,13 @@ extern void sched_show_task(struct task_struct *p);
|
||||
|
||||
#ifdef CONFIG_DETECT_SOFTLOCKUP
|
||||
extern void softlockup_tick(void);
|
||||
extern void spawn_softlockup_task(void);
|
||||
extern void touch_softlockup_watchdog(void);
|
||||
extern void touch_all_softlockup_watchdogs(void);
|
||||
extern unsigned long softlockup_thresh;
|
||||
extern unsigned int softlockup_panic;
|
||||
extern unsigned long sysctl_hung_task_check_count;
|
||||
extern unsigned long sysctl_hung_task_timeout_secs;
|
||||
extern unsigned long sysctl_hung_task_warnings;
|
||||
extern int softlockup_thresh;
|
||||
#else
|
||||
static inline void softlockup_tick(void)
|
||||
{
|
||||
@@ -350,7 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
|
||||
extern void arch_unmap_area(struct mm_struct *, unsigned long);
|
||||
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
|
||||
|
||||
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
|
||||
#if USE_SPLIT_PTLOCKS
|
||||
/*
|
||||
* The mm counters are not protected by its page_table_lock,
|
||||
* so must be incremented atomically.
|
||||
@@ -361,7 +363,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
|
||||
#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
|
||||
#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
|
||||
|
||||
#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
|
||||
#else /* !USE_SPLIT_PTLOCKS */
|
||||
/*
|
||||
* The mm counters are protected by its page_table_lock,
|
||||
* so can be incremented directly.
|
||||
@@ -372,7 +374,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
|
||||
#define inc_mm_counter(mm, member) (mm)->_##member++
|
||||
#define dec_mm_counter(mm, member) (mm)->_##member--
|
||||
|
||||
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
|
||||
#endif /* !USE_SPLIT_PTLOCKS */
|
||||
|
||||
#define get_mm_rss(mm) \
|
||||
(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
|
||||
@@ -449,8 +451,8 @@ struct signal_struct {
|
||||
* - everyone except group_exit_task is stopped during signal delivery
|
||||
* of fatal signals, group_exit_task processes the signal.
|
||||
*/
|
||||
struct task_struct *group_exit_task;
|
||||
int notify_count;
|
||||
struct task_struct *group_exit_task;
|
||||
|
||||
/* thread group stop support, overloads group_exit_code too */
|
||||
int group_stop_count;
|
||||
@@ -504,6 +506,7 @@ struct signal_struct {
|
||||
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
|
||||
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
|
||||
unsigned long inblock, oublock, cinblock, coublock;
|
||||
struct task_io_accounting ioac;
|
||||
|
||||
/*
|
||||
* Cumulative ns of scheduled CPU time for dead threads in the
|
||||
@@ -666,6 +669,10 @@ struct task_delay_info {
|
||||
/* io operations performed */
|
||||
u32 swapin_count; /* total count of the number of swapin block */
|
||||
/* io operations performed */
|
||||
|
||||
struct timespec freepages_start, freepages_end;
|
||||
u64 freepages_delay; /* wait for memory reclaim */
|
||||
u32 freepages_count; /* total count of memory reclaim */
|
||||
};
|
||||
#endif /* CONFIG_TASK_DELAY_ACCT */
|
||||
|
||||
@@ -784,6 +791,8 @@ struct sched_domain {
|
||||
unsigned int balance_interval; /* initialise to 1. units in ms. */
|
||||
unsigned int nr_balance_failed; /* initialise to 0 */
|
||||
|
||||
u64 last_update;
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
/* load_balance() stats */
|
||||
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
|
||||
@@ -815,30 +824,25 @@ struct sched_domain {
|
||||
unsigned int ttwu_move_affine;
|
||||
unsigned int ttwu_move_balance;
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
char *name;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
|
||||
struct sched_domain_attr *dattr_new);
|
||||
extern int arch_reinit_sched_domains(void);
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* A runqueue laden with a single nice 0 task scores a weighted_cpuload of
|
||||
* SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
|
||||
* task of nice 0 or enough lower priority tasks to bring up the
|
||||
* weighted_cpuload
|
||||
*/
|
||||
static inline int above_background_load(void)
|
||||
struct sched_domain_attr;
|
||||
|
||||
static inline void
|
||||
partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
|
||||
struct sched_domain_attr *dattr_new)
|
||||
{
|
||||
unsigned long cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
struct io_context; /* See blkdev.h */
|
||||
#define NGROUPS_SMALL 32
|
||||
@@ -896,7 +900,7 @@ struct sched_class {
|
||||
void (*yield_task) (struct rq *rq);
|
||||
int (*select_task_rq)(struct task_struct *p, int sync);
|
||||
|
||||
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
|
||||
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
|
||||
|
||||
struct task_struct * (*pick_next_task) (struct rq *rq);
|
||||
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
|
||||
@@ -921,8 +925,8 @@ struct sched_class {
|
||||
void (*set_cpus_allowed)(struct task_struct *p,
|
||||
const cpumask_t *newmask);
|
||||
|
||||
void (*join_domain)(struct rq *rq);
|
||||
void (*leave_domain)(struct rq *rq);
|
||||
void (*rq_online)(struct rq *rq);
|
||||
void (*rq_offline)(struct rq *rq);
|
||||
|
||||
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
|
||||
int running);
|
||||
@@ -1009,8 +1013,8 @@ struct sched_entity {
|
||||
|
||||
struct sched_rt_entity {
|
||||
struct list_head run_list;
|
||||
unsigned int time_slice;
|
||||
unsigned long timeout;
|
||||
unsigned int time_slice;
|
||||
int nr_cpus_allowed;
|
||||
|
||||
struct sched_rt_entity *back;
|
||||
@@ -1039,6 +1043,7 @@ struct task_struct {
|
||||
#endif
|
||||
|
||||
int prio, static_prio, normal_prio;
|
||||
unsigned int rt_priority;
|
||||
const struct sched_class *sched_class;
|
||||
struct sched_entity se;
|
||||
struct sched_rt_entity rt;
|
||||
@@ -1075,12 +1080,6 @@ struct task_struct {
|
||||
#endif
|
||||
|
||||
struct list_head tasks;
|
||||
/*
|
||||
* ptrace_list/ptrace_children forms the list of my children
|
||||
* that were stolen by a ptracer.
|
||||
*/
|
||||
struct list_head ptrace_children;
|
||||
struct list_head ptrace_list;
|
||||
|
||||
struct mm_struct *mm, *active_mm;
|
||||
|
||||
@@ -1101,18 +1100,25 @@ struct task_struct {
|
||||
/*
|
||||
* pointers to (original) parent process, youngest child, younger sibling,
|
||||
* older sibling, respectively. (p->father can be replaced with
|
||||
* p->parent->pid)
|
||||
* p->real_parent->pid)
|
||||
*/
|
||||
struct task_struct *real_parent; /* real parent process (when being debugged) */
|
||||
struct task_struct *parent; /* parent process */
|
||||
struct task_struct *real_parent; /* real parent process */
|
||||
struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
|
||||
/*
|
||||
* children/sibling forms the list of my children plus the
|
||||
* tasks I'm ptracing.
|
||||
* children/sibling forms the list of my natural children
|
||||
*/
|
||||
struct list_head children; /* list of my children */
|
||||
struct list_head sibling; /* linkage in my parent's children list */
|
||||
struct task_struct *group_leader; /* threadgroup leader */
|
||||
|
||||
/*
|
||||
* ptraced is the list of tasks this task is using ptrace on.
|
||||
* This includes both natural children and PTRACE_ATTACH targets.
|
||||
* p->ptrace_entry is p's link on the p->parent->ptraced list.
|
||||
*/
|
||||
struct list_head ptraced;
|
||||
struct list_head ptrace_entry;
|
||||
|
||||
/* PID/PID hash table linkage. */
|
||||
struct pid_link pids[PIDTYPE_MAX];
|
||||
struct list_head thread_group;
|
||||
@@ -1121,7 +1127,6 @@ struct task_struct {
|
||||
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
|
||||
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
|
||||
|
||||
unsigned int rt_priority;
|
||||
cputime_t utime, stime, utimescaled, stimescaled;
|
||||
cputime_t gtime;
|
||||
cputime_t prev_utime, prev_stime;
|
||||
@@ -1140,12 +1145,12 @@ struct task_struct {
|
||||
gid_t gid,egid,sgid,fsgid;
|
||||
struct group_info *group_info;
|
||||
kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
|
||||
unsigned securebits;
|
||||
struct user_struct *user;
|
||||
unsigned securebits;
|
||||
#ifdef CONFIG_KEYS
|
||||
unsigned char jit_keyring; /* default keyring to attach requested keys to */
|
||||
struct key *request_key_auth; /* assumed request_key authority */
|
||||
struct key *thread_keyring; /* keyring private to this thread */
|
||||
unsigned char jit_keyring; /* default keyring to attach requested keys to */
|
||||
#endif
|
||||
char comm[TASK_COMM_LEN]; /* executable name excluding path
|
||||
- access with [gs]et_task_comm (which lock
|
||||
@@ -1232,8 +1237,8 @@ struct task_struct {
|
||||
# define MAX_LOCK_DEPTH 48UL
|
||||
u64 curr_chain_key;
|
||||
int lockdep_depth;
|
||||
struct held_lock held_locks[MAX_LOCK_DEPTH];
|
||||
unsigned int lockdep_recursion;
|
||||
struct held_lock held_locks[MAX_LOCK_DEPTH];
|
||||
#endif
|
||||
|
||||
/* journalling filesystem info */
|
||||
@@ -1251,19 +1256,11 @@ struct task_struct {
|
||||
|
||||
unsigned long ptrace_message;
|
||||
siginfo_t *last_siginfo; /* For ptrace use. */
|
||||
#ifdef CONFIG_TASK_XACCT
|
||||
/* i/o counters(bytes read/written, #syscalls */
|
||||
u64 rchar, wchar, syscr, syscw;
|
||||
#endif
|
||||
struct task_io_accounting ioac;
|
||||
#if defined(CONFIG_TASK_XACCT)
|
||||
u64 acct_rss_mem1; /* accumulated rss usage */
|
||||
u64 acct_vm_mem1; /* accumulated virtual memory usage */
|
||||
cputime_t acct_stimexpd;/* stime since last update */
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA
|
||||
struct mempolicy *mempolicy;
|
||||
short il_next;
|
||||
cputime_t acct_timexpd; /* stime + utime since last update */
|
||||
#endif
|
||||
#ifdef CONFIG_CPUSETS
|
||||
nodemask_t mems_allowed;
|
||||
@@ -1283,6 +1280,10 @@ struct task_struct {
|
||||
#endif
|
||||
struct list_head pi_state_list;
|
||||
struct futex_pi_state *pi_state_cache;
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA
|
||||
struct mempolicy *mempolicy;
|
||||
short il_next;
|
||||
#endif
|
||||
atomic_t fs_excl; /* holding fs exclusive resources */
|
||||
struct rcu_head rcu;
|
||||
@@ -1476,6 +1477,10 @@ static inline void put_task_struct(struct task_struct *t)
|
||||
__put_task_struct(t);
|
||||
}
|
||||
|
||||
extern cputime_t task_utime(struct task_struct *p);
|
||||
extern cputime_t task_stime(struct task_struct *p);
|
||||
extern cputime_t task_gtime(struct task_struct *p);
|
||||
|
||||
/*
|
||||
* Per process flags
|
||||
*/
|
||||
@@ -1498,14 +1503,16 @@ static inline void put_task_struct(struct task_struct *t)
|
||||
#define PF_KSWAPD 0x00040000 /* I am kswapd */
|
||||
#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
|
||||
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
|
||||
#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */
|
||||
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
|
||||
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
|
||||
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
|
||||
#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
|
||||
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
|
||||
#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
|
||||
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
|
||||
#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
|
||||
|
||||
/*
|
||||
* Only the _current_ task can read/write to tsk->flags, but other
|
||||
@@ -1551,16 +1558,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
||||
|
||||
extern unsigned long long sched_clock(void);
|
||||
|
||||
extern void sched_clock_init(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
|
||||
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
static inline void sched_clock_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline u64 sched_clock_cpu(int cpu)
|
||||
{
|
||||
return sched_clock();
|
||||
}
|
||||
|
||||
static inline void sched_clock_tick(void)
|
||||
{
|
||||
}
|
||||
@@ -1573,8 +1574,6 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
|
||||
{
|
||||
}
|
||||
#else
|
||||
extern void sched_clock_init(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
extern void sched_clock_tick(void);
|
||||
extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
@@ -1621,6 +1620,7 @@ extern unsigned int sysctl_sched_child_runs_first;
|
||||
extern unsigned int sysctl_sched_features;
|
||||
extern unsigned int sysctl_sched_migration_cost;
|
||||
extern unsigned int sysctl_sched_nr_migrate;
|
||||
extern unsigned int sysctl_sched_shares_ratelimit;
|
||||
|
||||
int sched_nr_latency_handler(struct ctl_table *table, int write,
|
||||
struct file *file, void __user *buffer, size_t *length,
|
||||
@@ -1654,6 +1654,8 @@ extern int can_nice(const struct task_struct *p, const int nice);
|
||||
extern int task_curr(const struct task_struct *p);
|
||||
extern int idle_cpu(int cpu);
|
||||
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
|
||||
extern int sched_setscheduler_nocheck(struct task_struct *, int,
|
||||
struct sched_param *);
|
||||
extern struct task_struct *idle_task(int cpu);
|
||||
extern struct task_struct *curr_task(int cpu);
|
||||
extern void set_curr_task(int cpu, struct task_struct *p);
|
||||
@@ -1697,19 +1699,13 @@ extern struct pid_namespace init_pid_ns;
|
||||
* finds a task by its pid in the specified namespace
|
||||
* find_task_by_vpid():
|
||||
* finds a task by its virtual pid
|
||||
* find_task_by_pid():
|
||||
* finds a task by its global pid
|
||||
*
|
||||
* see also find_pid() etc in include/linux/pid.h
|
||||
* see also find_vpid() etc in include/linux/pid.h
|
||||
*/
|
||||
|
||||
extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
|
||||
struct pid_namespace *ns);
|
||||
|
||||
static inline struct task_struct *__deprecated find_task_by_pid(pid_t nr)
|
||||
{
|
||||
return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
|
||||
}
|
||||
extern struct task_struct *find_task_by_vpid(pid_t nr);
|
||||
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
|
||||
struct pid_namespace *ns);
|
||||
@@ -1777,12 +1773,11 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
|
||||
extern int kill_pgrp(struct pid *pid, int sig, int priv);
|
||||
extern int kill_pid(struct pid *pid, int sig, int priv);
|
||||
extern int kill_proc_info(int, struct siginfo *, pid_t);
|
||||
extern void do_notify_parent(struct task_struct *, int);
|
||||
extern int do_notify_parent(struct task_struct *, int);
|
||||
extern void force_sig(int, struct task_struct *);
|
||||
extern void force_sig_specific(int, struct task_struct *);
|
||||
extern int send_sig(int, struct task_struct *, int);
|
||||
extern void zap_other_threads(struct task_struct *p);
|
||||
extern int kill_proc(pid_t, int, int);
|
||||
extern struct sigqueue *sigqueue_alloc(void);
|
||||
extern void sigqueue_free(struct sigqueue *);
|
||||
extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
|
||||
@@ -1864,14 +1859,15 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
|
||||
extern char *get_task_comm(char *to, struct task_struct *tsk);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void wait_task_inactive(struct task_struct * p);
|
||||
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
|
||||
#else
|
||||
#define wait_task_inactive(p) do { } while (0)
|
||||
static inline unsigned long wait_task_inactive(struct task_struct *p,
|
||||
long match_state)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define remove_parent(p) list_del_init(&(p)->sibling)
|
||||
#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
|
||||
|
||||
#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
|
||||
|
||||
#define for_each_process(p) \
|
||||
@@ -1968,6 +1964,13 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
|
||||
|
||||
#endif
|
||||
|
||||
static inline int object_is_on_stack(void *obj)
|
||||
{
|
||||
void *stack = task_stack_page(current);
|
||||
|
||||
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
|
||||
}
|
||||
|
||||
extern void thread_info_cache_init(void);
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
@@ -2045,9 +2048,6 @@ static inline int signal_pending_state(long state, struct task_struct *p)
|
||||
if (!signal_pending(p))
|
||||
return 0;
|
||||
|
||||
if (state & (__TASK_STOPPED | __TASK_TRACED))
|
||||
return 0;
|
||||
|
||||
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
|
||||
}
|
||||
|
||||
@@ -2132,14 +2132,17 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
|
||||
extern void arch_pick_mmap_layout(struct mm_struct *mm);
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void
|
||||
__trace_special(void *__tr, void *__data,
|
||||
unsigned long arg1, unsigned long arg2, unsigned long arg3);
|
||||
#else
|
||||
static inline void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
static inline void
|
||||
__trace_special(void *__tr, void *__data,
|
||||
unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
||||
{
|
||||
mm->mmap_base = TASK_UNMAPPED_BASE;
|
||||
mm->get_unmapped_area = arch_get_unmapped_area;
|
||||
mm->unmap_area = arch_unmap_area;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -2177,22 +2180,22 @@ extern long sched_group_rt_period(struct task_group *tg);
|
||||
#ifdef CONFIG_TASK_XACCT
|
||||
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
|
||||
{
|
||||
tsk->rchar += amt;
|
||||
tsk->ioac.rchar += amt;
|
||||
}
|
||||
|
||||
static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
|
||||
{
|
||||
tsk->wchar += amt;
|
||||
tsk->ioac.wchar += amt;
|
||||
}
|
||||
|
||||
static inline void inc_syscr(struct task_struct *tsk)
|
||||
{
|
||||
tsk->syscr++;
|
||||
tsk->ioac.syscr++;
|
||||
}
|
||||
|
||||
static inline void inc_syscw(struct task_struct *tsk)
|
||||
{
|
||||
tsk->syscw++;
|
||||
tsk->ioac.syscw++;
|
||||
}
|
||||
#else
|
||||
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
|
||||
@@ -2212,14 +2215,6 @@ static inline void inc_syscw(struct task_struct *tsk)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void migration_init(void);
|
||||
#else
|
||||
static inline void migration_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef TASK_SIZE_OF
|
||||
#define TASK_SIZE_OF(tsk) TASK_SIZE
|
||||
#endif
|
||||
@@ -2237,6 +2232,8 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
|
||||
}
|
||||
#endif /* CONFIG_MM_OWNER */
|
||||
|
||||
#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user