Merge branches 'release', 'acpica', 'bugzilla-10224', 'bugzilla-9772', 'bugzilla-9916', 'ec', 'eeepc', 'idle', 'misc', 'pm-legacy', 'sysfs-links-2.6.26', 'thermal', 'thinkpad' and 'video' into release
这个提交包含在:

@@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
|
||||
signal.o sys.o kmod.o workqueue.o pid.o \
|
||||
rcupdate.o extable.o params.o posix-timers.o \
|
||||
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
|
||||
hrtimer.o rwsem.o nsproxy.o srcu.o \
|
||||
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
|
||||
notifier.o ksysfs.o pm_qos_params.o
|
||||
|
||||
obj-$(CONFIG_SYSCTL) += sysctl_check.o
|
||||
@@ -53,6 +53,7 @@ obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
|
||||
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
|
||||
obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||
obj-$(CONFIG_KGDB) += kgdb.o
|
||||
obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
|
||||
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
|
||||
obj-$(CONFIG_SECCOMP) += seccomp.o
|
||||
|
@@ -21,7 +21,7 @@
|
||||
*
|
||||
* Written by Rickard E. (Rik) Faith <faith@redhat.com>
|
||||
*
|
||||
* Goals: 1) Integrate fully with SELinux.
|
||||
* Goals: 1) Integrate fully with Security Modules.
|
||||
* 2) Minimal run-time overhead:
|
||||
* a) Minimal when syscall auditing is disabled (audit_enable=0).
|
||||
* b) Small when syscall auditing is enabled and no audit record
|
||||
@@ -55,7 +55,6 @@
|
||||
#include <net/netlink.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/selinux.h>
|
||||
#include <linux/inotify.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/tty.h>
|
||||
@@ -265,13 +264,13 @@ static int audit_log_config_change(char *function_name, int new, int old,
|
||||
char *ctx = NULL;
|
||||
u32 len;
|
||||
|
||||
rc = selinux_sid_to_string(sid, &ctx, &len);
|
||||
rc = security_secid_to_secctx(sid, &ctx, &len);
|
||||
if (rc) {
|
||||
audit_log_format(ab, " sid=%u", sid);
|
||||
allow_changes = 0; /* Something weird, deny request */
|
||||
} else {
|
||||
audit_log_format(ab, " subj=%s", ctx);
|
||||
kfree(ctx);
|
||||
security_release_secctx(ctx, len);
|
||||
}
|
||||
}
|
||||
audit_log_format(ab, " res=%d", allow_changes);
|
||||
@@ -550,12 +549,13 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
|
||||
audit_log_format(*ab, "user pid=%d uid=%u auid=%u",
|
||||
pid, uid, auid);
|
||||
if (sid) {
|
||||
rc = selinux_sid_to_string(sid, &ctx, &len);
|
||||
rc = security_secid_to_secctx(sid, &ctx, &len);
|
||||
if (rc)
|
||||
audit_log_format(*ab, " ssid=%u", sid);
|
||||
else
|
||||
else {
|
||||
audit_log_format(*ab, " subj=%s", ctx);
|
||||
kfree(ctx);
|
||||
security_release_secctx(ctx, len);
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
@@ -758,18 +758,18 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
break;
|
||||
}
|
||||
case AUDIT_SIGNAL_INFO:
|
||||
err = selinux_sid_to_string(audit_sig_sid, &ctx, &len);
|
||||
err = security_secid_to_secctx(audit_sig_sid, &ctx, &len);
|
||||
if (err)
|
||||
return err;
|
||||
sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL);
|
||||
if (!sig_data) {
|
||||
kfree(ctx);
|
||||
security_release_secctx(ctx, len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
sig_data->uid = audit_sig_uid;
|
||||
sig_data->pid = audit_sig_pid;
|
||||
memcpy(sig_data->ctx, ctx, len);
|
||||
kfree(ctx);
|
||||
security_release_secctx(ctx, len);
|
||||
audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO,
|
||||
0, 0, sig_data, sizeof(*sig_data) + len);
|
||||
kfree(sig_data);
|
||||
@@ -881,10 +881,6 @@ static int __init audit_init(void)
|
||||
audit_enabled = audit_default;
|
||||
audit_ever_enabled |= !!audit_default;
|
||||
|
||||
/* Register the callback with selinux. This callback will be invoked
|
||||
* when a new policy is loaded. */
|
||||
selinux_audit_set_callback(&selinux_audit_rule_update);
|
||||
|
||||
audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized");
|
||||
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
|
@@ -65,34 +65,9 @@ struct audit_watch {
|
||||
struct list_head rules; /* associated rules */
|
||||
};
|
||||
|
||||
struct audit_field {
|
||||
u32 type;
|
||||
u32 val;
|
||||
u32 op;
|
||||
char *se_str;
|
||||
struct selinux_audit_rule *se_rule;
|
||||
};
|
||||
|
||||
struct audit_tree;
|
||||
struct audit_chunk;
|
||||
|
||||
struct audit_krule {
|
||||
int vers_ops;
|
||||
u32 flags;
|
||||
u32 listnr;
|
||||
u32 action;
|
||||
u32 mask[AUDIT_BITMASK_SIZE];
|
||||
u32 buflen; /* for data alloc on list rules */
|
||||
u32 field_count;
|
||||
char *filterkey; /* ties events to rules */
|
||||
struct audit_field *fields;
|
||||
struct audit_field *arch_f; /* quick access to arch field */
|
||||
struct audit_field *inode_f; /* quick access to an inode field */
|
||||
struct audit_watch *watch; /* associated watch */
|
||||
struct audit_tree *tree; /* associated watched tree */
|
||||
struct list_head rlist; /* entry in audit_{watch,tree}.rules list */
|
||||
};
|
||||
|
||||
struct audit_entry {
|
||||
struct list_head list;
|
||||
struct rcu_head rcu;
|
||||
|
@@ -28,7 +28,7 @@
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/inotify.h>
|
||||
#include <linux/selinux.h>
|
||||
#include <linux/security.h>
|
||||
#include "audit.h"
|
||||
|
||||
/*
|
||||
@@ -38,7 +38,7 @@
|
||||
* Synchronizes writes and blocking reads of audit's filterlist
|
||||
* data. Rcu is used to traverse the filterlist and access
|
||||
* contents of structs audit_entry, audit_watch and opaque
|
||||
* selinux rules during filtering. If modified, these structures
|
||||
* LSM rules during filtering. If modified, these structures
|
||||
* must be copied and replace their counterparts in the filterlist.
|
||||
* An audit_parent struct is not accessed during filtering, so may
|
||||
* be written directly provided audit_filter_mutex is held.
|
||||
@@ -139,8 +139,8 @@ static inline void audit_free_rule(struct audit_entry *e)
|
||||
if (e->rule.fields)
|
||||
for (i = 0; i < e->rule.field_count; i++) {
|
||||
struct audit_field *f = &e->rule.fields[i];
|
||||
kfree(f->se_str);
|
||||
selinux_audit_rule_free(f->se_rule);
|
||||
kfree(f->lsm_str);
|
||||
security_audit_rule_free(f->lsm_rule);
|
||||
}
|
||||
kfree(e->rule.fields);
|
||||
kfree(e->rule.filterkey);
|
||||
@@ -554,8 +554,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
|
||||
f->op = data->fieldflags[i] & AUDIT_OPERATORS;
|
||||
f->type = data->fields[i];
|
||||
f->val = data->values[i];
|
||||
f->se_str = NULL;
|
||||
f->se_rule = NULL;
|
||||
f->lsm_str = NULL;
|
||||
f->lsm_rule = NULL;
|
||||
switch(f->type) {
|
||||
case AUDIT_PID:
|
||||
case AUDIT_UID:
|
||||
@@ -597,12 +597,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
|
||||
goto exit_free;
|
||||
entry->rule.buflen += f->val;
|
||||
|
||||
err = selinux_audit_rule_init(f->type, f->op, str,
|
||||
&f->se_rule);
|
||||
err = security_audit_rule_init(f->type, f->op, str,
|
||||
(void **)&f->lsm_rule);
|
||||
/* Keep currently invalid fields around in case they
|
||||
* become valid after a policy reload. */
|
||||
if (err == -EINVAL) {
|
||||
printk(KERN_WARNING "audit rule for selinux "
|
||||
printk(KERN_WARNING "audit rule for LSM "
|
||||
"\'%s\' is invalid\n", str);
|
||||
err = 0;
|
||||
}
|
||||
@@ -610,7 +610,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
|
||||
kfree(str);
|
||||
goto exit_free;
|
||||
} else
|
||||
f->se_str = str;
|
||||
f->lsm_str = str;
|
||||
break;
|
||||
case AUDIT_WATCH:
|
||||
str = audit_unpack_string(&bufp, &remain, f->val);
|
||||
@@ -754,7 +754,7 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
|
||||
case AUDIT_OBJ_LEV_LOW:
|
||||
case AUDIT_OBJ_LEV_HIGH:
|
||||
data->buflen += data->values[i] =
|
||||
audit_pack_string(&bufp, f->se_str);
|
||||
audit_pack_string(&bufp, f->lsm_str);
|
||||
break;
|
||||
case AUDIT_WATCH:
|
||||
data->buflen += data->values[i] =
|
||||
@@ -806,7 +806,7 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
|
||||
case AUDIT_OBJ_TYPE:
|
||||
case AUDIT_OBJ_LEV_LOW:
|
||||
case AUDIT_OBJ_LEV_HIGH:
|
||||
if (strcmp(a->fields[i].se_str, b->fields[i].se_str))
|
||||
if (strcmp(a->fields[i].lsm_str, b->fields[i].lsm_str))
|
||||
return 1;
|
||||
break;
|
||||
case AUDIT_WATCH:
|
||||
@@ -862,28 +862,28 @@ out:
|
||||
return new;
|
||||
}
|
||||
|
||||
/* Duplicate selinux field information. The se_rule is opaque, so must be
|
||||
/* Duplicate LSM field information. The lsm_rule is opaque, so must be
|
||||
* re-initialized. */
|
||||
static inline int audit_dupe_selinux_field(struct audit_field *df,
|
||||
static inline int audit_dupe_lsm_field(struct audit_field *df,
|
||||
struct audit_field *sf)
|
||||
{
|
||||
int ret = 0;
|
||||
char *se_str;
|
||||
char *lsm_str;
|
||||
|
||||
/* our own copy of se_str */
|
||||
se_str = kstrdup(sf->se_str, GFP_KERNEL);
|
||||
if (unlikely(!se_str))
|
||||
/* our own copy of lsm_str */
|
||||
lsm_str = kstrdup(sf->lsm_str, GFP_KERNEL);
|
||||
if (unlikely(!lsm_str))
|
||||
return -ENOMEM;
|
||||
df->se_str = se_str;
|
||||
df->lsm_str = lsm_str;
|
||||
|
||||
/* our own (refreshed) copy of se_rule */
|
||||
ret = selinux_audit_rule_init(df->type, df->op, df->se_str,
|
||||
&df->se_rule);
|
||||
/* our own (refreshed) copy of lsm_rule */
|
||||
ret = security_audit_rule_init(df->type, df->op, df->lsm_str,
|
||||
(void **)&df->lsm_rule);
|
||||
/* Keep currently invalid fields around in case they
|
||||
* become valid after a policy reload. */
|
||||
if (ret == -EINVAL) {
|
||||
printk(KERN_WARNING "audit rule for selinux \'%s\' is "
|
||||
"invalid\n", df->se_str);
|
||||
printk(KERN_WARNING "audit rule for LSM \'%s\' is "
|
||||
"invalid\n", df->lsm_str);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@@ -891,7 +891,7 @@ static inline int audit_dupe_selinux_field(struct audit_field *df,
|
||||
}
|
||||
|
||||
/* Duplicate an audit rule. This will be a deep copy with the exception
|
||||
* of the watch - that pointer is carried over. The selinux specific fields
|
||||
* of the watch - that pointer is carried over. The LSM specific fields
|
||||
* will be updated in the copy. The point is to be able to replace the old
|
||||
* rule with the new rule in the filterlist, then free the old rule.
|
||||
* The rlist element is undefined; list manipulations are handled apart from
|
||||
@@ -930,7 +930,7 @@ static struct audit_entry *audit_dupe_rule(struct audit_krule *old,
|
||||
new->tree = old->tree;
|
||||
memcpy(new->fields, old->fields, sizeof(struct audit_field) * fcount);
|
||||
|
||||
/* deep copy this information, updating the se_rule fields, because
|
||||
/* deep copy this information, updating the lsm_rule fields, because
|
||||
* the originals will all be freed when the old rule is freed. */
|
||||
for (i = 0; i < fcount; i++) {
|
||||
switch (new->fields[i].type) {
|
||||
@@ -944,7 +944,7 @@ static struct audit_entry *audit_dupe_rule(struct audit_krule *old,
|
||||
case AUDIT_OBJ_TYPE:
|
||||
case AUDIT_OBJ_LEV_LOW:
|
||||
case AUDIT_OBJ_LEV_HIGH:
|
||||
err = audit_dupe_selinux_field(&new->fields[i],
|
||||
err = audit_dupe_lsm_field(&new->fields[i],
|
||||
&old->fields[i]);
|
||||
break;
|
||||
case AUDIT_FILTERKEY:
|
||||
@@ -1515,11 +1515,12 @@ static void audit_log_rule_change(uid_t loginuid, u32 sid, char *action,
|
||||
if (sid) {
|
||||
char *ctx = NULL;
|
||||
u32 len;
|
||||
if (selinux_sid_to_string(sid, &ctx, &len))
|
||||
if (security_secid_to_secctx(sid, &ctx, &len))
|
||||
audit_log_format(ab, " ssid=%u", sid);
|
||||
else
|
||||
else {
|
||||
audit_log_format(ab, " subj=%s", ctx);
|
||||
kfree(ctx);
|
||||
security_release_secctx(ctx, len);
|
||||
}
|
||||
}
|
||||
audit_log_format(ab, " op=%s rule key=", action);
|
||||
if (rule->filterkey)
|
||||
@@ -1761,38 +1762,12 @@ unlock_and_return:
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Check to see if the rule contains any selinux fields. Returns 1 if there
|
||||
are selinux fields specified in the rule, 0 otherwise. */
|
||||
static inline int audit_rule_has_selinux(struct audit_krule *rule)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rule->field_count; i++) {
|
||||
struct audit_field *f = &rule->fields[i];
|
||||
switch (f->type) {
|
||||
case AUDIT_SUBJ_USER:
|
||||
case AUDIT_SUBJ_ROLE:
|
||||
case AUDIT_SUBJ_TYPE:
|
||||
case AUDIT_SUBJ_SEN:
|
||||
case AUDIT_SUBJ_CLR:
|
||||
case AUDIT_OBJ_USER:
|
||||
case AUDIT_OBJ_ROLE:
|
||||
case AUDIT_OBJ_TYPE:
|
||||
case AUDIT_OBJ_LEV_LOW:
|
||||
case AUDIT_OBJ_LEV_HIGH:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This function will re-initialize the se_rule field of all applicable rules.
|
||||
* It will traverse the filter lists serarching for rules that contain selinux
|
||||
/* This function will re-initialize the lsm_rule field of all applicable rules.
|
||||
* It will traverse the filter lists serarching for rules that contain LSM
|
||||
* specific filter fields. When such a rule is found, it is copied, the
|
||||
* selinux field is re-initialized, and the old rule is replaced with the
|
||||
* LSM field is re-initialized, and the old rule is replaced with the
|
||||
* updated rule. */
|
||||
int selinux_audit_rule_update(void)
|
||||
int audit_update_lsm_rules(void)
|
||||
{
|
||||
struct audit_entry *entry, *n, *nentry;
|
||||
struct audit_watch *watch;
|
||||
@@ -1804,7 +1779,7 @@ int selinux_audit_rule_update(void)
|
||||
|
||||
for (i = 0; i < AUDIT_NR_FILTERS; i++) {
|
||||
list_for_each_entry_safe(entry, n, &audit_filter_list[i], list) {
|
||||
if (!audit_rule_has_selinux(&entry->rule))
|
||||
if (!security_audit_rule_known(&entry->rule))
|
||||
continue;
|
||||
|
||||
watch = entry->rule.watch;
|
||||
@@ -1815,7 +1790,7 @@ int selinux_audit_rule_update(void)
|
||||
* return value */
|
||||
if (!err)
|
||||
err = PTR_ERR(nentry);
|
||||
audit_panic("error updating selinux filters");
|
||||
audit_panic("error updating LSM filters");
|
||||
if (watch)
|
||||
list_del(&entry->rule.rlist);
|
||||
list_del_rcu(&entry->list);
|
||||
|
@@ -61,7 +61,6 @@
|
||||
#include <linux/security.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/selinux.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/syscalls.h>
|
||||
@@ -528,14 +527,14 @@ static int audit_filter_rules(struct task_struct *tsk,
|
||||
match for now to avoid losing information that
|
||||
may be wanted. An error message will also be
|
||||
logged upon error */
|
||||
if (f->se_rule) {
|
||||
if (f->lsm_rule) {
|
||||
if (need_sid) {
|
||||
selinux_get_task_sid(tsk, &sid);
|
||||
security_task_getsecid(tsk, &sid);
|
||||
need_sid = 0;
|
||||
}
|
||||
result = selinux_audit_rule_match(sid, f->type,
|
||||
result = security_audit_rule_match(sid, f->type,
|
||||
f->op,
|
||||
f->se_rule,
|
||||
f->lsm_rule,
|
||||
ctx);
|
||||
}
|
||||
break;
|
||||
@@ -546,18 +545,18 @@ static int audit_filter_rules(struct task_struct *tsk,
|
||||
case AUDIT_OBJ_LEV_HIGH:
|
||||
/* The above note for AUDIT_SUBJ_USER...AUDIT_SUBJ_CLR
|
||||
also applies here */
|
||||
if (f->se_rule) {
|
||||
if (f->lsm_rule) {
|
||||
/* Find files that match */
|
||||
if (name) {
|
||||
result = selinux_audit_rule_match(
|
||||
result = security_audit_rule_match(
|
||||
name->osid, f->type, f->op,
|
||||
f->se_rule, ctx);
|
||||
f->lsm_rule, ctx);
|
||||
} else if (ctx) {
|
||||
for (j = 0; j < ctx->name_count; j++) {
|
||||
if (selinux_audit_rule_match(
|
||||
if (security_audit_rule_match(
|
||||
ctx->names[j].osid,
|
||||
f->type, f->op,
|
||||
f->se_rule, ctx)) {
|
||||
f->lsm_rule, ctx)) {
|
||||
++result;
|
||||
break;
|
||||
}
|
||||
@@ -570,7 +569,7 @@ static int audit_filter_rules(struct task_struct *tsk,
|
||||
aux = aux->next) {
|
||||
if (aux->type == AUDIT_IPC) {
|
||||
struct audit_aux_data_ipcctl *axi = (void *)aux;
|
||||
if (selinux_audit_rule_match(axi->osid, f->type, f->op, f->se_rule, ctx)) {
|
||||
if (security_audit_rule_match(axi->osid, f->type, f->op, f->lsm_rule, ctx)) {
|
||||
++result;
|
||||
break;
|
||||
}
|
||||
@@ -885,11 +884,11 @@ void audit_log_task_context(struct audit_buffer *ab)
|
||||
int error;
|
||||
u32 sid;
|
||||
|
||||
selinux_get_task_sid(current, &sid);
|
||||
security_task_getsecid(current, &sid);
|
||||
if (!sid)
|
||||
return;
|
||||
|
||||
error = selinux_sid_to_string(sid, &ctx, &len);
|
||||
error = security_secid_to_secctx(sid, &ctx, &len);
|
||||
if (error) {
|
||||
if (error != -EINVAL)
|
||||
goto error_path;
|
||||
@@ -897,7 +896,7 @@ void audit_log_task_context(struct audit_buffer *ab)
|
||||
}
|
||||
|
||||
audit_log_format(ab, " subj=%s", ctx);
|
||||
kfree(ctx);
|
||||
security_release_secctx(ctx, len);
|
||||
return;
|
||||
|
||||
error_path:
|
||||
@@ -941,7 +940,7 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
|
||||
u32 sid, char *comm)
|
||||
{
|
||||
struct audit_buffer *ab;
|
||||
char *s = NULL;
|
||||
char *ctx = NULL;
|
||||
u32 len;
|
||||
int rc = 0;
|
||||
|
||||
@@ -951,15 +950,16 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
|
||||
|
||||
audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, auid,
|
||||
uid, sessionid);
|
||||
if (selinux_sid_to_string(sid, &s, &len)) {
|
||||
if (security_secid_to_secctx(sid, &ctx, &len)) {
|
||||
audit_log_format(ab, " obj=(none)");
|
||||
rc = 1;
|
||||
} else
|
||||
audit_log_format(ab, " obj=%s", s);
|
||||
} else {
|
||||
audit_log_format(ab, " obj=%s", ctx);
|
||||
security_release_secctx(ctx, len);
|
||||
}
|
||||
audit_log_format(ab, " ocomm=");
|
||||
audit_log_untrustedstring(ab, comm);
|
||||
audit_log_end(ab);
|
||||
kfree(s);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -1271,14 +1271,15 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
|
||||
if (axi->osid != 0) {
|
||||
char *ctx = NULL;
|
||||
u32 len;
|
||||
if (selinux_sid_to_string(
|
||||
if (security_secid_to_secctx(
|
||||
axi->osid, &ctx, &len)) {
|
||||
audit_log_format(ab, " osid=%u",
|
||||
axi->osid);
|
||||
call_panic = 1;
|
||||
} else
|
||||
} else {
|
||||
audit_log_format(ab, " obj=%s", ctx);
|
||||
kfree(ctx);
|
||||
security_release_secctx(ctx, len);
|
||||
}
|
||||
}
|
||||
break; }
|
||||
|
||||
@@ -1392,13 +1393,14 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
|
||||
if (n->osid != 0) {
|
||||
char *ctx = NULL;
|
||||
u32 len;
|
||||
if (selinux_sid_to_string(
|
||||
if (security_secid_to_secctx(
|
||||
n->osid, &ctx, &len)) {
|
||||
audit_log_format(ab, " osid=%u", n->osid);
|
||||
call_panic = 2;
|
||||
} else
|
||||
} else {
|
||||
audit_log_format(ab, " obj=%s", ctx);
|
||||
kfree(ctx);
|
||||
security_release_secctx(ctx, len);
|
||||
}
|
||||
}
|
||||
|
||||
audit_log_end(ab);
|
||||
@@ -1775,7 +1777,7 @@ static void audit_copy_inode(struct audit_names *name, const struct inode *inode
|
||||
name->uid = inode->i_uid;
|
||||
name->gid = inode->i_gid;
|
||||
name->rdev = inode->i_rdev;
|
||||
selinux_get_inode_sid(inode, &name->osid);
|
||||
security_inode_getsecid(inode, &name->osid);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2190,8 +2192,7 @@ int __audit_ipc_obj(struct kern_ipc_perm *ipcp)
|
||||
ax->uid = ipcp->uid;
|
||||
ax->gid = ipcp->gid;
|
||||
ax->mode = ipcp->mode;
|
||||
selinux_get_ipc_sid(ipcp, &ax->osid);
|
||||
|
||||
security_ipc_getsecid(ipcp, &ax->osid);
|
||||
ax->d.type = AUDIT_IPC;
|
||||
ax->d.next = context->aux;
|
||||
context->aux = (void *)ax;
|
||||
@@ -2343,7 +2344,7 @@ void __audit_ptrace(struct task_struct *t)
|
||||
context->target_auid = audit_get_loginuid(t);
|
||||
context->target_uid = t->uid;
|
||||
context->target_sessionid = audit_get_sessionid(t);
|
||||
selinux_get_task_sid(t, &context->target_sid);
|
||||
security_task_getsecid(t, &context->target_sid);
|
||||
memcpy(context->target_comm, t->comm, TASK_COMM_LEN);
|
||||
}
|
||||
|
||||
@@ -2371,7 +2372,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
|
||||
audit_sig_uid = tsk->loginuid;
|
||||
else
|
||||
audit_sig_uid = tsk->uid;
|
||||
selinux_get_task_sid(tsk, &audit_sig_sid);
|
||||
security_task_getsecid(tsk, &audit_sig_sid);
|
||||
}
|
||||
if (!audit_signals || audit_dummy_context())
|
||||
return 0;
|
||||
@@ -2384,7 +2385,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
|
||||
ctx->target_auid = audit_get_loginuid(t);
|
||||
ctx->target_uid = t->uid;
|
||||
ctx->target_sessionid = audit_get_sessionid(t);
|
||||
selinux_get_task_sid(t, &ctx->target_sid);
|
||||
security_task_getsecid(t, &ctx->target_sid);
|
||||
memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN);
|
||||
return 0;
|
||||
}
|
||||
@@ -2405,7 +2406,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
|
||||
axp->target_auid[axp->pid_count] = audit_get_loginuid(t);
|
||||
axp->target_uid[axp->pid_count] = t->uid;
|
||||
axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t);
|
||||
selinux_get_task_sid(t, &axp->target_sid[axp->pid_count]);
|
||||
security_task_getsecid(t, &axp->target_sid[axp->pid_count]);
|
||||
memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN);
|
||||
axp->pid_count++;
|
||||
|
||||
@@ -2435,16 +2436,17 @@ void audit_core_dumps(long signr)
|
||||
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
|
||||
audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
|
||||
auid, current->uid, current->gid, sessionid);
|
||||
selinux_get_task_sid(current, &sid);
|
||||
security_task_getsecid(current, &sid);
|
||||
if (sid) {
|
||||
char *ctx = NULL;
|
||||
u32 len;
|
||||
|
||||
if (selinux_sid_to_string(sid, &ctx, &len))
|
||||
if (security_secid_to_secctx(sid, &ctx, &len))
|
||||
audit_log_format(ab, " ssid=%u", sid);
|
||||
else
|
||||
else {
|
||||
audit_log_format(ab, " subj=%s", ctx);
|
||||
kfree(ctx);
|
||||
security_release_secctx(ctx, len);
|
||||
}
|
||||
}
|
||||
audit_log_format(ab, " pid=%d comm=", current->pid);
|
||||
audit_log_untrustedstring(ab, current->comm);
|
||||
|
23
kernel/bounds.c
普通文件
23
kernel/bounds.c
普通文件
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Generate definitions needed by the preprocessor.
|
||||
* This code generates raw asm output which is post-processed
|
||||
* to extract and format the required data.
|
||||
*/
|
||||
|
||||
#define __GENERATING_BOUNDS_H
|
||||
/* Include headers that define the enum constants of interest */
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/mmzone.h>
|
||||
|
||||
#define DEFINE(sym, val) \
|
||||
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
|
||||
|
||||
#define BLANK() asm volatile("\n->" : : )
|
||||
|
||||
void foo(void)
|
||||
{
|
||||
/* The enum constants to put into include/linux/bounds.h */
|
||||
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
|
||||
DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
|
||||
/* End of constants */
|
||||
}
|
@@ -1722,7 +1722,12 @@ void cgroup_enable_task_cg_lists(void)
|
||||
use_task_css_set_links = 1;
|
||||
do_each_thread(g, p) {
|
||||
task_lock(p);
|
||||
if (list_empty(&p->cg_list))
|
||||
/*
|
||||
* We should check if the process is exiting, otherwise
|
||||
* it will race with cgroup_exit() in that the list
|
||||
* entry won't be deleted though the process has exited.
|
||||
*/
|
||||
if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
|
||||
list_add(&p->cg_list, &p->cgroups->tasks);
|
||||
task_unlock(p);
|
||||
} while_each_thread(g, p);
|
||||
|
@@ -47,15 +47,14 @@ static long compat_nanosleep_restart(struct restart_block *restart)
|
||||
mm_segment_t oldfs;
|
||||
long ret;
|
||||
|
||||
rmtp = (struct compat_timespec __user *)(restart->arg1);
|
||||
restart->arg1 = (unsigned long)&rmt;
|
||||
restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
|
||||
oldfs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = hrtimer_nanosleep_restart(restart);
|
||||
set_fs(oldfs);
|
||||
|
||||
if (ret) {
|
||||
restart->arg1 = (unsigned long)rmtp;
|
||||
rmtp = restart->nanosleep.compat_rmtp;
|
||||
|
||||
if (rmtp && put_compat_timespec(&rmt, rmtp))
|
||||
return -EFAULT;
|
||||
@@ -89,7 +88,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
|
||||
= ¤t_thread_info()->restart_block;
|
||||
|
||||
restart->fn = compat_nanosleep_restart;
|
||||
restart->arg1 = (unsigned long)rmtp;
|
||||
restart->nanosleep.compat_rmtp = rmtp;
|
||||
|
||||
if (rmtp && put_compat_timespec(&rmt, rmtp))
|
||||
return -EFAULT;
|
||||
@@ -446,7 +445,7 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
return sched_setaffinity(pid, new_mask);
|
||||
return sched_setaffinity(pid, &new_mask);
|
||||
}
|
||||
|
||||
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
|
||||
@@ -607,9 +606,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
|
||||
long err;
|
||||
mm_segment_t oldfs;
|
||||
struct timespec tu;
|
||||
struct compat_timespec *rmtp = (struct compat_timespec *)(restart->arg1);
|
||||
struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
|
||||
|
||||
restart->arg1 = (unsigned long) &tu;
|
||||
restart->nanosleep.rmtp = (struct timespec __user *) &tu;
|
||||
oldfs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
err = clock_nanosleep_restart(restart);
|
||||
@@ -621,7 +620,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
|
||||
|
||||
if (err == -ERESTART_RESTARTBLOCK) {
|
||||
restart->fn = compat_clock_nanosleep_restart;
|
||||
restart->arg1 = (unsigned long) rmtp;
|
||||
restart->nanosleep.compat_rmtp = rmtp;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@@ -652,7 +651,7 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
|
||||
if (err == -ERESTART_RESTARTBLOCK) {
|
||||
restart = ¤t_thread_info()->restart_block;
|
||||
restart->fn = compat_clock_nanosleep_restart;
|
||||
restart->arg1 = (unsigned long) rmtp;
|
||||
restart->nanosleep.compat_rmtp = rmtp;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@@ -232,9 +232,9 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
|
||||
/* Ensure that we are not runnable on dying cpu */
|
||||
old_allowed = current->cpus_allowed;
|
||||
tmp = CPU_MASK_ALL;
|
||||
cpus_setall(tmp);
|
||||
cpu_clear(cpu, tmp);
|
||||
set_cpus_allowed(current, tmp);
|
||||
set_cpus_allowed_ptr(current, &tmp);
|
||||
|
||||
p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
|
||||
|
||||
@@ -268,7 +268,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
out_thread:
|
||||
err = kthread_stop(p);
|
||||
out_allowed:
|
||||
set_cpus_allowed(current, old_allowed);
|
||||
set_cpus_allowed_ptr(current, &old_allowed);
|
||||
out_release:
|
||||
cpu_hotplug_done();
|
||||
return err;
|
||||
|
125
kernel/cpuset.c
125
kernel/cpuset.c
@@ -98,6 +98,9 @@ struct cpuset {
|
||||
/* partition number for rebuild_sched_domains() */
|
||||
int pn;
|
||||
|
||||
/* for custom sched domain */
|
||||
int relax_domain_level;
|
||||
|
||||
/* used for walking a cpuset heirarchy */
|
||||
struct list_head stack_list;
|
||||
};
|
||||
@@ -478,6 +481,16 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
|
||||
return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
|
||||
}
|
||||
|
||||
static void
|
||||
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
|
||||
{
|
||||
if (!dattr)
|
||||
return;
|
||||
if (dattr->relax_domain_level < c->relax_domain_level)
|
||||
dattr->relax_domain_level = c->relax_domain_level;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* rebuild_sched_domains()
|
||||
*
|
||||
@@ -553,12 +566,14 @@ static void rebuild_sched_domains(void)
|
||||
int csn; /* how many cpuset ptrs in csa so far */
|
||||
int i, j, k; /* indices for partition finding loops */
|
||||
cpumask_t *doms; /* resulting partition; i.e. sched domains */
|
||||
struct sched_domain_attr *dattr; /* attributes for custom domains */
|
||||
int ndoms; /* number of sched domains in result */
|
||||
int nslot; /* next empty doms[] cpumask_t slot */
|
||||
|
||||
q = NULL;
|
||||
csa = NULL;
|
||||
doms = NULL;
|
||||
dattr = NULL;
|
||||
|
||||
/* Special case for the 99% of systems with one, full, sched domain */
|
||||
if (is_sched_load_balance(&top_cpuset)) {
|
||||
@@ -566,6 +581,11 @@ static void rebuild_sched_domains(void)
|
||||
doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
|
||||
if (!doms)
|
||||
goto rebuild;
|
||||
dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
|
||||
if (dattr) {
|
||||
*dattr = SD_ATTR_INIT;
|
||||
update_domain_attr(dattr, &top_cpuset);
|
||||
}
|
||||
*doms = top_cpuset.cpus_allowed;
|
||||
goto rebuild;
|
||||
}
|
||||
@@ -622,6 +642,7 @@ restart:
|
||||
doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
|
||||
if (!doms)
|
||||
goto rebuild;
|
||||
dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
|
||||
|
||||
for (nslot = 0, i = 0; i < csn; i++) {
|
||||
struct cpuset *a = csa[i];
|
||||
@@ -644,12 +665,15 @@ restart:
|
||||
}
|
||||
|
||||
cpus_clear(*dp);
|
||||
if (dattr)
|
||||
*(dattr + nslot) = SD_ATTR_INIT;
|
||||
for (j = i; j < csn; j++) {
|
||||
struct cpuset *b = csa[j];
|
||||
|
||||
if (apn == b->pn) {
|
||||
cpus_or(*dp, *dp, b->cpus_allowed);
|
||||
b->pn = -1;
|
||||
update_domain_attr(dattr, b);
|
||||
}
|
||||
}
|
||||
nslot++;
|
||||
@@ -660,7 +684,7 @@ restart:
|
||||
rebuild:
|
||||
/* Have scheduler rebuild sched domains */
|
||||
get_online_cpus();
|
||||
partition_sched_domains(ndoms, doms);
|
||||
partition_sched_domains(ndoms, doms, dattr);
|
||||
put_online_cpus();
|
||||
|
||||
done:
|
||||
@@ -668,6 +692,7 @@ done:
|
||||
kfifo_free(q);
|
||||
kfree(csa);
|
||||
/* Don't kfree(doms) -- partition_sched_domains() does that. */
|
||||
/* Don't kfree(dattr) -- partition_sched_domains() does that. */
|
||||
}
|
||||
|
||||
static inline int started_after_time(struct task_struct *t1,
|
||||
@@ -729,7 +754,7 @@ int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
|
||||
*/
|
||||
void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
|
||||
{
|
||||
set_cpus_allowed(tsk, (cgroup_cs(scan->cg))->cpus_allowed);
|
||||
set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -916,7 +941,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
|
||||
cs->mems_generation = cpuset_mems_generation++;
|
||||
mutex_unlock(&callback_mutex);
|
||||
|
||||
cpuset_being_rebound = cs; /* causes mpol_copy() rebind */
|
||||
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
|
||||
|
||||
fudge = 10; /* spare mmarray[] slots */
|
||||
fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
|
||||
@@ -967,7 +992,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
|
||||
* rebind the vma mempolicies of each mm in mmarray[] to their
|
||||
* new cpuset, and release that mm. The mpol_rebind_mm()
|
||||
* call takes mmap_sem, which we couldn't take while holding
|
||||
* tasklist_lock. Forks can happen again now - the mpol_copy()
|
||||
* tasklist_lock. Forks can happen again now - the mpol_dup()
|
||||
* cpuset_being_rebound check will catch such forks, and rebind
|
||||
* their vma mempolicies too. Because we still hold the global
|
||||
* cgroup_mutex, we know that no other rebind effort will
|
||||
@@ -1011,6 +1036,21 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int update_relax_domain_level(struct cpuset *cs, char *buf)
|
||||
{
|
||||
int val = simple_strtol(buf, NULL, 10);
|
||||
|
||||
if (val < 0)
|
||||
val = -1;
|
||||
|
||||
if (val != cs->relax_domain_level) {
|
||||
cs->relax_domain_level = val;
|
||||
rebuild_sched_domains();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* update_flag - read a 0 or a 1 in a file and update associated flag
|
||||
* bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
|
||||
@@ -1178,7 +1218,7 @@ static void cpuset_attach(struct cgroup_subsys *ss,
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
guarantee_online_cpus(cs, &cpus);
|
||||
set_cpus_allowed(tsk, cpus);
|
||||
set_cpus_allowed_ptr(tsk, &cpus);
|
||||
mutex_unlock(&callback_mutex);
|
||||
|
||||
from = oldcs->mems_allowed;
|
||||
@@ -1202,6 +1242,7 @@ typedef enum {
|
||||
FILE_CPU_EXCLUSIVE,
|
||||
FILE_MEM_EXCLUSIVE,
|
||||
FILE_SCHED_LOAD_BALANCE,
|
||||
FILE_SCHED_RELAX_DOMAIN_LEVEL,
|
||||
FILE_MEMORY_PRESSURE_ENABLED,
|
||||
FILE_MEMORY_PRESSURE,
|
||||
FILE_SPREAD_PAGE,
|
||||
@@ -1224,7 +1265,8 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont,
|
||||
return -E2BIG;
|
||||
|
||||
/* +1 for nul-terminator */
|
||||
if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0)
|
||||
buffer = kmalloc(nbytes + 1, GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(buffer, userbuf, nbytes)) {
|
||||
@@ -1256,6 +1298,9 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont,
|
||||
case FILE_SCHED_LOAD_BALANCE:
|
||||
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
|
||||
break;
|
||||
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
|
||||
retval = update_relax_domain_level(cs, buffer);
|
||||
break;
|
||||
case FILE_MEMORY_MIGRATE:
|
||||
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
|
||||
break;
|
||||
@@ -1354,6 +1399,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont,
|
||||
case FILE_SCHED_LOAD_BALANCE:
|
||||
*s++ = is_sched_load_balance(cs) ? '1' : '0';
|
||||
break;
|
||||
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
|
||||
s += sprintf(s, "%d", cs->relax_domain_level);
|
||||
break;
|
||||
case FILE_MEMORY_MIGRATE:
|
||||
*s++ = is_memory_migrate(cs) ? '1' : '0';
|
||||
break;
|
||||
@@ -1424,6 +1472,13 @@ static struct cftype cft_sched_load_balance = {
|
||||
.private = FILE_SCHED_LOAD_BALANCE,
|
||||
};
|
||||
|
||||
static struct cftype cft_sched_relax_domain_level = {
|
||||
.name = "sched_relax_domain_level",
|
||||
.read = cpuset_common_file_read,
|
||||
.write = cpuset_common_file_write,
|
||||
.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
|
||||
};
|
||||
|
||||
static struct cftype cft_memory_migrate = {
|
||||
.name = "memory_migrate",
|
||||
.read = cpuset_common_file_read,
|
||||
@@ -1475,6 +1530,9 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss,
|
||||
&cft_sched_relax_domain_level)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
|
||||
@@ -1555,10 +1613,11 @@ static struct cgroup_subsys_state *cpuset_create(
|
||||
if (is_spread_slab(parent))
|
||||
set_bit(CS_SPREAD_SLAB, &cs->flags);
|
||||
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
|
||||
cs->cpus_allowed = CPU_MASK_NONE;
|
||||
cs->mems_allowed = NODE_MASK_NONE;
|
||||
cpus_clear(cs->cpus_allowed);
|
||||
nodes_clear(cs->mems_allowed);
|
||||
cs->mems_generation = cpuset_mems_generation++;
|
||||
fmeter_init(&cs->fmeter);
|
||||
cs->relax_domain_level = -1;
|
||||
|
||||
cs->parent = parent;
|
||||
number_of_cpusets++;
|
||||
@@ -1625,12 +1684,13 @@ int __init cpuset_init(void)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
top_cpuset.cpus_allowed = CPU_MASK_ALL;
|
||||
top_cpuset.mems_allowed = NODE_MASK_ALL;
|
||||
cpus_setall(top_cpuset.cpus_allowed);
|
||||
nodes_setall(top_cpuset.mems_allowed);
|
||||
|
||||
fmeter_init(&top_cpuset.fmeter);
|
||||
top_cpuset.mems_generation = cpuset_mems_generation++;
|
||||
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
|
||||
top_cpuset.relax_domain_level = -1;
|
||||
|
||||
err = register_filesystem(&cpuset_fs_type);
|
||||
if (err < 0)
|
||||
@@ -1844,6 +1904,7 @@ void __init cpuset_init_smp(void)
|
||||
|
||||
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
|
||||
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
|
||||
* @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
|
||||
*
|
||||
* Description: Returns the cpumask_t cpus_allowed of the cpuset
|
||||
* attached to the specified @tsk. Guaranteed to return some non-empty
|
||||
@@ -1851,35 +1912,27 @@ void __init cpuset_init_smp(void)
|
||||
* tasks cpuset.
|
||||
**/
|
||||
|
||||
cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
|
||||
void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask)
|
||||
{
|
||||
cpumask_t mask;
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
mask = cpuset_cpus_allowed_locked(tsk);
|
||||
cpuset_cpus_allowed_locked(tsk, pmask);
|
||||
mutex_unlock(&callback_mutex);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
|
||||
* Must be called with callback_mutex held.
|
||||
**/
|
||||
cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk)
|
||||
void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask)
|
||||
{
|
||||
cpumask_t mask;
|
||||
|
||||
task_lock(tsk);
|
||||
guarantee_online_cpus(task_cs(tsk), &mask);
|
||||
guarantee_online_cpus(task_cs(tsk), pmask);
|
||||
task_unlock(tsk);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
void cpuset_init_current_mems_allowed(void)
|
||||
{
|
||||
current->mems_allowed = NODE_MASK_ALL;
|
||||
nodes_setall(current->mems_allowed);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1906,22 +1959,14 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
|
||||
* @zl: the zonelist to be checked
|
||||
* cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
|
||||
* @nodemask: the nodemask to be checked
|
||||
*
|
||||
* Are any of the nodes on zonelist zl allowed in current->mems_allowed?
|
||||
* Are any of the nodes in the nodemask allowed in current->mems_allowed?
|
||||
*/
|
||||
int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
|
||||
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; zl->zones[i]; i++) {
|
||||
int nid = zone_to_nid(zl->zones[i]);
|
||||
|
||||
if (node_isset(nid, current->mems_allowed))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
return nodes_intersects(*nodemask, current->mems_allowed);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2261,8 +2306,16 @@ void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
|
||||
m->count += cpumask_scnprintf(m->buf + m->count, m->size - m->count,
|
||||
task->cpus_allowed);
|
||||
seq_printf(m, "\n");
|
||||
seq_printf(m, "Cpus_allowed_list:\t");
|
||||
m->count += cpulist_scnprintf(m->buf + m->count, m->size - m->count,
|
||||
task->cpus_allowed);
|
||||
seq_printf(m, "\n");
|
||||
seq_printf(m, "Mems_allowed:\t");
|
||||
m->count += nodemask_scnprintf(m->buf + m->count, m->size - m->count,
|
||||
task->mems_allowed);
|
||||
seq_printf(m, "\n");
|
||||
seq_printf(m, "Mems_allowed_list:\t");
|
||||
m->count += nodelist_scnprintf(m->buf + m->count, m->size - m->count,
|
||||
task->mems_allowed);
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
|
@@ -507,10 +507,9 @@ void put_files_struct(struct files_struct *files)
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(put_files_struct);
|
||||
|
||||
void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
|
||||
void reset_files_struct(struct files_struct *files)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct files_struct *old;
|
||||
|
||||
old = tsk->files;
|
||||
@@ -519,9 +518,8 @@ void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
|
||||
task_unlock(tsk);
|
||||
put_files_struct(old);
|
||||
}
|
||||
EXPORT_SYMBOL(reset_files_struct);
|
||||
|
||||
static void __exit_files(struct task_struct *tsk)
|
||||
void exit_files(struct task_struct *tsk)
|
||||
{
|
||||
struct files_struct * files = tsk->files;
|
||||
|
||||
@@ -533,12 +531,7 @@ static void __exit_files(struct task_struct *tsk)
|
||||
}
|
||||
}
|
||||
|
||||
void exit_files(struct task_struct *tsk)
|
||||
{
|
||||
__exit_files(tsk);
|
||||
}
|
||||
|
||||
static void __put_fs_struct(struct fs_struct *fs)
|
||||
void put_fs_struct(struct fs_struct *fs)
|
||||
{
|
||||
/* No need to hold fs->lock if we are killing it */
|
||||
if (atomic_dec_and_test(&fs->count)) {
|
||||
@@ -550,12 +543,7 @@ static void __put_fs_struct(struct fs_struct *fs)
|
||||
}
|
||||
}
|
||||
|
||||
void put_fs_struct(struct fs_struct *fs)
|
||||
{
|
||||
__put_fs_struct(fs);
|
||||
}
|
||||
|
||||
static void __exit_fs(struct task_struct *tsk)
|
||||
void exit_fs(struct task_struct *tsk)
|
||||
{
|
||||
struct fs_struct * fs = tsk->fs;
|
||||
|
||||
@@ -563,15 +551,10 @@ static void __exit_fs(struct task_struct *tsk)
|
||||
task_lock(tsk);
|
||||
tsk->fs = NULL;
|
||||
task_unlock(tsk);
|
||||
__put_fs_struct(fs);
|
||||
put_fs_struct(fs);
|
||||
}
|
||||
}
|
||||
|
||||
void exit_fs(struct task_struct *tsk)
|
||||
{
|
||||
__exit_fs(tsk);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(exit_fs);
|
||||
|
||||
/*
|
||||
@@ -967,8 +950,8 @@ NORET_TYPE void do_exit(long code)
|
||||
if (group_dead)
|
||||
acct_process();
|
||||
exit_sem(tsk);
|
||||
__exit_files(tsk);
|
||||
__exit_fs(tsk);
|
||||
exit_files(tsk);
|
||||
exit_fs(tsk);
|
||||
check_stack_usage();
|
||||
exit_thread();
|
||||
cgroup_exit(tsk, 1);
|
||||
@@ -984,7 +967,7 @@ NORET_TYPE void do_exit(long code)
|
||||
proc_exit_connector(tsk);
|
||||
exit_notify(tsk, group_dead);
|
||||
#ifdef CONFIG_NUMA
|
||||
mpol_free(tsk->mempolicy);
|
||||
mpol_put(tsk->mempolicy);
|
||||
tsk->mempolicy = NULL;
|
||||
#endif
|
||||
#ifdef CONFIG_FUTEX
|
||||
|
101
kernel/fork.c
101
kernel/fork.c
@@ -132,6 +132,14 @@ void __put_task_struct(struct task_struct *tsk)
|
||||
free_task(tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
* macro override instead of weak attribute alias, to workaround
|
||||
* gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
|
||||
*/
|
||||
#ifndef arch_task_cache_init
|
||||
#define arch_task_cache_init()
|
||||
#endif
|
||||
|
||||
void __init fork_init(unsigned long mempages)
|
||||
{
|
||||
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
|
||||
@@ -144,6 +152,9 @@ void __init fork_init(unsigned long mempages)
|
||||
ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
|
||||
#endif
|
||||
|
||||
/* do the arch specific task caches init */
|
||||
arch_task_cache_init();
|
||||
|
||||
/*
|
||||
* The default maximum number of threads is set to a safe
|
||||
* value: the thread structures can take up at most half
|
||||
@@ -163,6 +174,13 @@ void __init fork_init(unsigned long mempages)
|
||||
init_task.signal->rlim[RLIMIT_NPROC];
|
||||
}
|
||||
|
||||
int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
|
||||
struct task_struct *src)
|
||||
{
|
||||
*dst = *src;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct task_struct *dup_task_struct(struct task_struct *orig)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
@@ -181,15 +199,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*tsk = *orig;
|
||||
err = arch_dup_task_struct(tsk, orig);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
tsk->stack = ti;
|
||||
|
||||
err = prop_local_init_single(&tsk->dirties);
|
||||
if (err) {
|
||||
free_thread_info(ti);
|
||||
free_task_struct(tsk);
|
||||
return NULL;
|
||||
}
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
setup_thread_stack(tsk, orig);
|
||||
|
||||
@@ -205,6 +223,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
||||
#endif
|
||||
tsk->splice_pipe = NULL;
|
||||
return tsk;
|
||||
|
||||
out:
|
||||
free_thread_info(ti);
|
||||
free_task_struct(tsk);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
@@ -256,7 +279,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
if (!tmp)
|
||||
goto fail_nomem;
|
||||
*tmp = *mpnt;
|
||||
pol = mpol_copy(vma_policy(mpnt));
|
||||
pol = mpol_dup(vma_policy(mpnt));
|
||||
retval = PTR_ERR(pol);
|
||||
if (IS_ERR(pol))
|
||||
goto fail_nomem_policy;
|
||||
@@ -498,7 +521,7 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
|
||||
* Allocate a new mm structure and copy contents from the
|
||||
* mm structure of the passed in task structure.
|
||||
*/
|
||||
static struct mm_struct *dup_mm(struct task_struct *tsk)
|
||||
struct mm_struct *dup_mm(struct task_struct *tsk)
|
||||
{
|
||||
struct mm_struct *mm, *oldmm = current->mm;
|
||||
int err;
|
||||
@@ -782,12 +805,6 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: we may be using current for both targets (See exec.c)
|
||||
* This works because we cache current->files (old) as oldf. Don't
|
||||
* break this.
|
||||
*/
|
||||
tsk->files = NULL;
|
||||
newf = dup_fd(oldf, &error);
|
||||
if (!newf)
|
||||
goto out;
|
||||
@@ -823,34 +840,6 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to unshare the files of the current task.
|
||||
* We don't want to expose copy_files internals to
|
||||
* the exec layer of the kernel.
|
||||
*/
|
||||
|
||||
int unshare_files(void)
|
||||
{
|
||||
struct files_struct *files = current->files;
|
||||
int rc;
|
||||
|
||||
BUG_ON(!files);
|
||||
|
||||
/* This can race but the race causes us to copy when we don't
|
||||
need to and drop the copy */
|
||||
if(atomic_read(&files->count) == 1)
|
||||
{
|
||||
atomic_inc(&files->count);
|
||||
return 0;
|
||||
}
|
||||
rc = copy_files(0, current);
|
||||
if(rc)
|
||||
current->files = files;
|
||||
return rc;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(unshare_files);
|
||||
|
||||
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
|
||||
{
|
||||
struct sighand_struct *sig;
|
||||
@@ -1127,7 +1116,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
p->audit_context = NULL;
|
||||
cgroup_fork(p);
|
||||
#ifdef CONFIG_NUMA
|
||||
p->mempolicy = mpol_copy(p->mempolicy);
|
||||
p->mempolicy = mpol_dup(p->mempolicy);
|
||||
if (IS_ERR(p->mempolicy)) {
|
||||
retval = PTR_ERR(p->mempolicy);
|
||||
p->mempolicy = NULL;
|
||||
@@ -1385,7 +1374,7 @@ bad_fork_cleanup_security:
|
||||
security_task_free(p);
|
||||
bad_fork_cleanup_policy:
|
||||
#ifdef CONFIG_NUMA
|
||||
mpol_free(p->mempolicy);
|
||||
mpol_put(p->mempolicy);
|
||||
bad_fork_cleanup_cgroup:
|
||||
#endif
|
||||
cgroup_exit(p, cgroup_callbacks_done);
|
||||
@@ -1788,3 +1777,27 @@ bad_unshare_cleanup_thread:
|
||||
bad_unshare_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to unshare the files of the current task.
|
||||
* We don't want to expose copy_files internals to
|
||||
* the exec layer of the kernel.
|
||||
*/
|
||||
|
||||
int unshare_files(struct files_struct **displaced)
|
||||
{
|
||||
struct task_struct *task = current;
|
||||
struct files_struct *copy = NULL;
|
||||
int error;
|
||||
|
||||
error = unshare_fd(CLONE_FILES, ©);
|
||||
if (error || !copy) {
|
||||
*displaced = NULL;
|
||||
return error;
|
||||
}
|
||||
*displaced = task->files;
|
||||
task_lock(task);
|
||||
task->files = copy;
|
||||
task_unlock(task);
|
||||
return 0;
|
||||
}
|
||||
|
129
kernel/hrtimer.c
129
kernel/hrtimer.c
@@ -590,7 +590,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
||||
list_add_tail(&timer->cb_entry,
|
||||
&base->cpu_base->cb_pending);
|
||||
timer->state = HRTIMER_STATE_PENDING;
|
||||
raise_softirq(HRTIMER_SOFTIRQ);
|
||||
return 1;
|
||||
default:
|
||||
BUG();
|
||||
@@ -633,6 +632,11 @@ static int hrtimer_switch_to_hres(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void hrtimer_raise_softirq(void)
|
||||
{
|
||||
raise_softirq(HRTIMER_SOFTIRQ);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline int hrtimer_hres_active(void) { return 0; }
|
||||
@@ -651,6 +655,7 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void hrtimer_raise_softirq(void) { }
|
||||
|
||||
#endif /* CONFIG_HIGH_RES_TIMERS */
|
||||
|
||||
@@ -850,7 +855,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
|
||||
{
|
||||
struct hrtimer_clock_base *base, *new_base;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
int ret, raise;
|
||||
|
||||
base = lock_hrtimer_base(timer, &flags);
|
||||
|
||||
@@ -884,8 +889,18 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
|
||||
enqueue_hrtimer(timer, new_base,
|
||||
new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
|
||||
|
||||
/*
|
||||
* The timer may be expired and moved to the cb_pending
|
||||
* list. We can not raise the softirq with base lock held due
|
||||
* to a possible deadlock with runqueue lock.
|
||||
*/
|
||||
raise = timer->state == HRTIMER_STATE_PENDING;
|
||||
|
||||
unlock_hrtimer_base(timer, &flags);
|
||||
|
||||
if (raise)
|
||||
hrtimer_raise_softirq();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hrtimer_start);
|
||||
@@ -1080,8 +1095,19 @@ static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
|
||||
* If the timer was rearmed on another CPU, reprogram
|
||||
* the event device.
|
||||
*/
|
||||
if (timer->base->first == &timer->node)
|
||||
hrtimer_reprogram(timer, timer->base);
|
||||
struct hrtimer_clock_base *base = timer->base;
|
||||
|
||||
if (base->first == &timer->node &&
|
||||
hrtimer_reprogram(timer, base)) {
|
||||
/*
|
||||
* Timer is expired. Thus move it from tree to
|
||||
* pending list again.
|
||||
*/
|
||||
__remove_hrtimer(timer, base,
|
||||
HRTIMER_STATE_PENDING, 0);
|
||||
list_add_tail(&timer->cb_entry,
|
||||
&base->cpu_base->cb_pending);
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&cpu_base->lock);
|
||||
@@ -1238,51 +1264,50 @@ void hrtimer_run_pending(void)
|
||||
/*
|
||||
* Called from hardirq context every jiffy
|
||||
*/
|
||||
static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
|
||||
int index)
|
||||
{
|
||||
struct rb_node *node;
|
||||
struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
|
||||
|
||||
if (!base->first)
|
||||
return;
|
||||
|
||||
if (base->get_softirq_time)
|
||||
base->softirq_time = base->get_softirq_time();
|
||||
|
||||
spin_lock(&cpu_base->lock);
|
||||
|
||||
while ((node = base->first)) {
|
||||
struct hrtimer *timer;
|
||||
|
||||
timer = rb_entry(node, struct hrtimer, node);
|
||||
if (base->softirq_time.tv64 <= timer->expires.tv64)
|
||||
break;
|
||||
|
||||
if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
|
||||
__remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0);
|
||||
list_add_tail(&timer->cb_entry,
|
||||
&base->cpu_base->cb_pending);
|
||||
continue;
|
||||
}
|
||||
|
||||
__run_hrtimer(timer);
|
||||
}
|
||||
spin_unlock(&cpu_base->lock);
|
||||
}
|
||||
|
||||
void hrtimer_run_queues(void)
|
||||
{
|
||||
struct rb_node *node;
|
||||
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
||||
int i;
|
||||
struct hrtimer_clock_base *base;
|
||||
int index, gettime = 1;
|
||||
|
||||
if (hrtimer_hres_active())
|
||||
return;
|
||||
|
||||
hrtimer_get_softirq_time(cpu_base);
|
||||
for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
|
||||
base = &cpu_base->clock_base[index];
|
||||
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
|
||||
run_hrtimer_queue(cpu_base, i);
|
||||
if (!base->first)
|
||||
continue;
|
||||
|
||||
if (base->get_softirq_time)
|
||||
base->softirq_time = base->get_softirq_time();
|
||||
else if (gettime) {
|
||||
hrtimer_get_softirq_time(cpu_base);
|
||||
gettime = 0;
|
||||
}
|
||||
|
||||
spin_lock(&cpu_base->lock);
|
||||
|
||||
while ((node = base->first)) {
|
||||
struct hrtimer *timer;
|
||||
|
||||
timer = rb_entry(node, struct hrtimer, node);
|
||||
if (base->softirq_time.tv64 <= timer->expires.tv64)
|
||||
break;
|
||||
|
||||
if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
|
||||
__remove_hrtimer(timer, base,
|
||||
HRTIMER_STATE_PENDING, 0);
|
||||
list_add_tail(&timer->cb_entry,
|
||||
&base->cpu_base->cb_pending);
|
||||
continue;
|
||||
}
|
||||
|
||||
__run_hrtimer(timer);
|
||||
}
|
||||
spin_unlock(&cpu_base->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1354,13 +1379,13 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
|
||||
struct hrtimer_sleeper t;
|
||||
struct timespec __user *rmtp;
|
||||
|
||||
hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS);
|
||||
t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2;
|
||||
hrtimer_init(&t.timer, restart->nanosleep.index, HRTIMER_MODE_ABS);
|
||||
t.timer.expires.tv64 = restart->nanosleep.expires;
|
||||
|
||||
if (do_nanosleep(&t, HRTIMER_MODE_ABS))
|
||||
return 0;
|
||||
|
||||
rmtp = (struct timespec __user *)restart->arg1;
|
||||
rmtp = restart->nanosleep.rmtp;
|
||||
if (rmtp) {
|
||||
int ret = update_rmtp(&t.timer, rmtp);
|
||||
if (ret <= 0)
|
||||
@@ -1394,10 +1419,9 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
|
||||
|
||||
restart = ¤t_thread_info()->restart_block;
|
||||
restart->fn = hrtimer_nanosleep_restart;
|
||||
restart->arg0 = (unsigned long) t.timer.base->index;
|
||||
restart->arg1 = (unsigned long) rmtp;
|
||||
restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF;
|
||||
restart->arg3 = t.timer.expires.tv64 >> 32;
|
||||
restart->nanosleep.index = t.timer.base->index;
|
||||
restart->nanosleep.rmtp = rmtp;
|
||||
restart->nanosleep.expires = t.timer.expires.tv64;
|
||||
|
||||
return -ERESTART_RESTARTBLOCK;
|
||||
}
|
||||
@@ -1425,7 +1449,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
|
||||
int i;
|
||||
|
||||
spin_lock_init(&cpu_base->lock);
|
||||
lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key);
|
||||
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
|
||||
cpu_base->clock_base[i].cpu_base = cpu_base;
|
||||
@@ -1466,16 +1489,16 @@ static void migrate_hrtimers(int cpu)
|
||||
tick_cancel_sched_timer(cpu);
|
||||
|
||||
local_irq_disable();
|
||||
double_spin_lock(&new_base->lock, &old_base->lock,
|
||||
smp_processor_id() < cpu);
|
||||
spin_lock(&new_base->lock);
|
||||
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
||||
migrate_hrtimer_list(&old_base->clock_base[i],
|
||||
&new_base->clock_base[i]);
|
||||
}
|
||||
|
||||
double_spin_unlock(&new_base->lock, &old_base->lock,
|
||||
smp_processor_id() < cpu);
|
||||
spin_unlock(&old_base->lock);
|
||||
spin_unlock(&new_base->lock);
|
||||
local_irq_enable();
|
||||
put_cpu_var(hrtimer_bases);
|
||||
}
|
||||
|
@@ -47,7 +47,7 @@ void dynamic_irq_init(unsigned int irq)
|
||||
desc->irq_count = 0;
|
||||
desc->irqs_unhandled = 0;
|
||||
#ifdef CONFIG_SMP
|
||||
desc->affinity = CPU_MASK_ALL;
|
||||
cpus_setall(desc->affinity);
|
||||
#endif
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
@@ -29,7 +29,6 @@
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
/* Per cpu memory for storing cpu states in case of system crash. */
|
||||
@@ -1406,6 +1405,9 @@ static int __init crash_save_vmcoreinfo_init(void)
|
||||
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
|
||||
VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
|
||||
VMCOREINFO_NUMBER(NR_FREE_PAGES);
|
||||
VMCOREINFO_NUMBER(PG_lru);
|
||||
VMCOREINFO_NUMBER(PG_private);
|
||||
VMCOREINFO_NUMBER(PG_swapcache);
|
||||
|
||||
arch_crash_save_vmcoreinfo();
|
||||
|
||||
|
1700
kernel/kgdb.c
普通文件
1700
kernel/kgdb.c
普通文件
文件差异内容过多而无法显示
加载差异
@@ -165,7 +165,7 @@ static int ____call_usermodehelper(void *data)
|
||||
}
|
||||
|
||||
/* We can run anywhere, unlike our parent keventd(). */
|
||||
set_cpus_allowed(current, CPU_MASK_ALL);
|
||||
set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
|
||||
|
||||
/*
|
||||
* Our parent is keventd, which runs with elevated scheduling priority.
|
||||
|
357
kernel/kprobes.c
357
kernel/kprobes.c
@@ -72,6 +72,18 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
|
||||
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
|
||||
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
|
||||
|
||||
/*
|
||||
* Normally, functions that we'd want to prohibit kprobes in, are marked
|
||||
* __kprobes. But, there are cases where such functions already belong to
|
||||
* a different section (__sched for preempt_schedule)
|
||||
*
|
||||
* For such cases, we now have a blacklist
|
||||
*/
|
||||
struct kprobe_blackpoint kprobe_blacklist[] = {
|
||||
{"preempt_schedule",},
|
||||
{NULL} /* Terminator */
|
||||
};
|
||||
|
||||
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
/*
|
||||
* kprobe->ainsn.insn points to the copy of the instruction to be
|
||||
@@ -417,6 +429,21 @@ static inline void free_rp_inst(struct kretprobe *rp)
|
||||
}
|
||||
}
|
||||
|
||||
static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct kretprobe_instance *ri;
|
||||
struct hlist_node *pos, *next;
|
||||
/* No race here */
|
||||
spin_lock_irqsave(&kretprobe_lock, flags);
|
||||
hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
|
||||
ri->rp = NULL;
|
||||
hlist_del(&ri->uflist);
|
||||
}
|
||||
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
||||
free_rp_inst(rp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep all fields in the kprobe consistent
|
||||
*/
|
||||
@@ -492,9 +519,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
|
||||
|
||||
static int __kprobes in_kprobes_functions(unsigned long addr)
|
||||
{
|
||||
struct kprobe_blackpoint *kb;
|
||||
|
||||
if (addr >= (unsigned long)__kprobes_text_start &&
|
||||
addr < (unsigned long)__kprobes_text_end)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* If there exists a kprobe_blacklist, verify and
|
||||
* fail any probe registration in the prohibited area
|
||||
*/
|
||||
for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
|
||||
if (kb->start_addr) {
|
||||
if (addr >= kb->start_addr &&
|
||||
addr < (kb->start_addr + kb->range))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -555,6 +595,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
|
||||
}
|
||||
|
||||
p->nmissed = 0;
|
||||
INIT_LIST_HEAD(&p->list);
|
||||
mutex_lock(&kprobe_mutex);
|
||||
old_p = get_kprobe(p->addr);
|
||||
if (old_p) {
|
||||
@@ -581,35 +622,28 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes register_kprobe(struct kprobe *p)
|
||||
/*
|
||||
* Unregister a kprobe without a scheduler synchronization.
|
||||
*/
|
||||
static int __kprobes __unregister_kprobe_top(struct kprobe *p)
|
||||
{
|
||||
return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void __kprobes unregister_kprobe(struct kprobe *p)
|
||||
{
|
||||
struct module *mod;
|
||||
struct kprobe *old_p, *list_p;
|
||||
int cleanup_p;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
old_p = get_kprobe(p->addr);
|
||||
if (unlikely(!old_p)) {
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return;
|
||||
}
|
||||
if (unlikely(!old_p))
|
||||
return -EINVAL;
|
||||
|
||||
if (p != old_p) {
|
||||
list_for_each_entry_rcu(list_p, &old_p->list, list)
|
||||
if (list_p == p)
|
||||
/* kprobe p is a valid probe */
|
||||
goto valid_p;
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return;
|
||||
return -EINVAL;
|
||||
}
|
||||
valid_p:
|
||||
if (old_p == p ||
|
||||
(old_p->pre_handler == aggr_pre_handler &&
|
||||
p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
|
||||
list_is_singular(&old_p->list))) {
|
||||
/*
|
||||
* Only probe on the hash list. Disarm only if kprobes are
|
||||
* enabled - otherwise, the breakpoint would already have
|
||||
@@ -618,45 +652,99 @@ valid_p:
|
||||
if (kprobe_enabled)
|
||||
arch_disarm_kprobe(p);
|
||||
hlist_del_rcu(&old_p->hlist);
|
||||
cleanup_p = 1;
|
||||
} else {
|
||||
if (p->break_handler)
|
||||
old_p->break_handler = NULL;
|
||||
if (p->post_handler) {
|
||||
list_for_each_entry_rcu(list_p, &old_p->list, list) {
|
||||
if ((list_p != p) && (list_p->post_handler))
|
||||
goto noclean;
|
||||
}
|
||||
old_p->post_handler = NULL;
|
||||
}
|
||||
noclean:
|
||||
list_del_rcu(&p->list);
|
||||
cleanup_p = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
|
||||
{
|
||||
struct module *mod;
|
||||
struct kprobe *old_p;
|
||||
|
||||
synchronize_sched();
|
||||
if (p->mod_refcounted) {
|
||||
mod = module_text_address((unsigned long)p->addr);
|
||||
if (mod)
|
||||
module_put(mod);
|
||||
}
|
||||
|
||||
if (cleanup_p) {
|
||||
if (p != old_p) {
|
||||
list_del_rcu(&p->list);
|
||||
if (list_empty(&p->list) || list_is_singular(&p->list)) {
|
||||
if (!list_empty(&p->list)) {
|
||||
/* "p" is the last child of an aggr_kprobe */
|
||||
old_p = list_entry(p->list.next, struct kprobe, list);
|
||||
list_del(&p->list);
|
||||
kfree(old_p);
|
||||
}
|
||||
arch_remove_kprobe(p);
|
||||
} else {
|
||||
mutex_lock(&kprobe_mutex);
|
||||
if (p->break_handler)
|
||||
old_p->break_handler = NULL;
|
||||
if (p->post_handler){
|
||||
list_for_each_entry_rcu(list_p, &old_p->list, list){
|
||||
if (list_p->post_handler){
|
||||
cleanup_p = 2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (cleanup_p == 0)
|
||||
old_p->post_handler = NULL;
|
||||
}
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static int __register_kprobes(struct kprobe **kps, int num,
|
||||
unsigned long called_from)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
||||
if (num <= 0)
|
||||
return -EINVAL;
|
||||
for (i = 0; i < num; i++) {
|
||||
ret = __register_kprobe(kps[i], called_from);
|
||||
if (ret < 0 && i > 0) {
|
||||
unregister_kprobes(kps, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Registration and unregistration functions for kprobe.
|
||||
*/
|
||||
int __kprobes register_kprobe(struct kprobe *p)
|
||||
{
|
||||
return __register_kprobes(&p, 1,
|
||||
(unsigned long)__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void __kprobes unregister_kprobe(struct kprobe *p)
|
||||
{
|
||||
unregister_kprobes(&p, 1);
|
||||
}
|
||||
|
||||
int __kprobes register_kprobes(struct kprobe **kps, int num)
|
||||
{
|
||||
return __register_kprobes(kps, num,
|
||||
(unsigned long)__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void __kprobes unregister_kprobes(struct kprobe **kps, int num)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (num <= 0)
|
||||
return;
|
||||
mutex_lock(&kprobe_mutex);
|
||||
for (i = 0; i < num; i++)
|
||||
if (__unregister_kprobe_top(kps[i]) < 0)
|
||||
kps[i]->addr = NULL;
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
synchronize_sched();
|
||||
for (i = 0; i < num; i++)
|
||||
if (kps[i]->addr)
|
||||
__unregister_kprobe_bottom(kps[i]);
|
||||
}
|
||||
|
||||
static struct notifier_block kprobe_exceptions_nb = {
|
||||
.notifier_call = kprobe_exceptions_notify,
|
||||
.priority = 0x7fffffff /* we need to be notified first */
|
||||
@@ -667,24 +755,69 @@ unsigned long __weak arch_deref_entry_point(void *entry)
|
||||
return (unsigned long)entry;
|
||||
}
|
||||
|
||||
static int __register_jprobes(struct jprobe **jps, int num,
|
||||
unsigned long called_from)
|
||||
{
|
||||
struct jprobe *jp;
|
||||
int ret = 0, i;
|
||||
|
||||
if (num <= 0)
|
||||
return -EINVAL;
|
||||
for (i = 0; i < num; i++) {
|
||||
unsigned long addr;
|
||||
jp = jps[i];
|
||||
addr = arch_deref_entry_point(jp->entry);
|
||||
|
||||
if (!kernel_text_address(addr))
|
||||
ret = -EINVAL;
|
||||
else {
|
||||
/* Todo: Verify probepoint is a function entry point */
|
||||
jp->kp.pre_handler = setjmp_pre_handler;
|
||||
jp->kp.break_handler = longjmp_break_handler;
|
||||
ret = __register_kprobe(&jp->kp, called_from);
|
||||
}
|
||||
if (ret < 0 && i > 0) {
|
||||
unregister_jprobes(jps, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes register_jprobe(struct jprobe *jp)
|
||||
{
|
||||
unsigned long addr = arch_deref_entry_point(jp->entry);
|
||||
|
||||
if (!kernel_text_address(addr))
|
||||
return -EINVAL;
|
||||
|
||||
/* Todo: Verify probepoint is a function entry point */
|
||||
jp->kp.pre_handler = setjmp_pre_handler;
|
||||
jp->kp.break_handler = longjmp_break_handler;
|
||||
|
||||
return __register_kprobe(&jp->kp,
|
||||
return __register_jprobes(&jp, 1,
|
||||
(unsigned long)__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void __kprobes unregister_jprobe(struct jprobe *jp)
|
||||
{
|
||||
unregister_kprobe(&jp->kp);
|
||||
unregister_jprobes(&jp, 1);
|
||||
}
|
||||
|
||||
int __kprobes register_jprobes(struct jprobe **jps, int num)
|
||||
{
|
||||
return __register_jprobes(jps, num,
|
||||
(unsigned long)__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void __kprobes unregister_jprobes(struct jprobe **jps, int num)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (num <= 0)
|
||||
return;
|
||||
mutex_lock(&kprobe_mutex);
|
||||
for (i = 0; i < num; i++)
|
||||
if (__unregister_kprobe_top(&jps[i]->kp) < 0)
|
||||
jps[i]->kp.addr = NULL;
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
synchronize_sched();
|
||||
for (i = 0; i < num; i++) {
|
||||
if (jps[i]->kp.addr)
|
||||
__unregister_kprobe_bottom(&jps[i]->kp);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
@@ -725,7 +858,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __kprobes register_kretprobe(struct kretprobe *rp)
|
||||
static int __kprobes __register_kretprobe(struct kretprobe *rp,
|
||||
unsigned long called_from)
|
||||
{
|
||||
int ret = 0;
|
||||
struct kretprobe_instance *inst;
|
||||
@@ -771,46 +905,101 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
|
||||
|
||||
rp->nmissed = 0;
|
||||
/* Establish function entry probe point */
|
||||
if ((ret = __register_kprobe(&rp->kp,
|
||||
(unsigned long)__builtin_return_address(0))) != 0)
|
||||
ret = __register_kprobe(&rp->kp, called_from);
|
||||
if (ret != 0)
|
||||
free_rp_inst(rp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __register_kretprobes(struct kretprobe **rps, int num,
|
||||
unsigned long called_from)
|
||||
{
|
||||
int ret = 0, i;
|
||||
|
||||
if (num <= 0)
|
||||
return -EINVAL;
|
||||
for (i = 0; i < num; i++) {
|
||||
ret = __register_kretprobe(rps[i], called_from);
|
||||
if (ret < 0 && i > 0) {
|
||||
unregister_kretprobes(rps, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes register_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
return __register_kretprobes(&rp, 1,
|
||||
(unsigned long)__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void __kprobes unregister_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
unregister_kretprobes(&rp, 1);
|
||||
}
|
||||
|
||||
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
|
||||
{
|
||||
return __register_kretprobes(rps, num,
|
||||
(unsigned long)__builtin_return_address(0));
|
||||
}
|
||||
|
||||
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (num <= 0)
|
||||
return;
|
||||
mutex_lock(&kprobe_mutex);
|
||||
for (i = 0; i < num; i++)
|
||||
if (__unregister_kprobe_top(&rps[i]->kp) < 0)
|
||||
rps[i]->kp.addr = NULL;
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
synchronize_sched();
|
||||
for (i = 0; i < num; i++) {
|
||||
if (rps[i]->kp.addr) {
|
||||
__unregister_kprobe_bottom(&rps[i]->kp);
|
||||
cleanup_rp_inst(rps[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else /* CONFIG_KRETPROBES */
|
||||
int __kprobes register_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
void __kprobes unregister_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
}
|
||||
|
||||
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
|
||||
{
|
||||
}
|
||||
|
||||
static int __kprobes pre_handler_kretprobe(struct kprobe *p,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KRETPROBES */
|
||||
|
||||
void __kprobes unregister_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct kretprobe_instance *ri;
|
||||
struct hlist_node *pos, *next;
|
||||
|
||||
unregister_kprobe(&rp->kp);
|
||||
|
||||
/* No race here */
|
||||
spin_lock_irqsave(&kretprobe_lock, flags);
|
||||
hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
|
||||
ri->rp = NULL;
|
||||
hlist_del(&ri->uflist);
|
||||
}
|
||||
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
||||
free_rp_inst(rp);
|
||||
}
|
||||
|
||||
static int __init init_kprobes(void)
|
||||
{
|
||||
int i, err = 0;
|
||||
unsigned long offset = 0, size = 0;
|
||||
char *modname, namebuf[128];
|
||||
const char *symbol_name;
|
||||
void *addr;
|
||||
struct kprobe_blackpoint *kb;
|
||||
|
||||
/* FIXME allocate the probe table, currently defined statically */
|
||||
/* initialize all list heads */
|
||||
@@ -819,6 +1008,28 @@ static int __init init_kprobes(void)
|
||||
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lookup and populate the kprobe_blacklist.
|
||||
*
|
||||
* Unlike the kretprobe blacklist, we'll need to determine
|
||||
* the range of addresses that belong to the said functions,
|
||||
* since a kprobe need not necessarily be at the beginning
|
||||
* of a function.
|
||||
*/
|
||||
for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
|
||||
kprobe_lookup_name(kb->name, addr);
|
||||
if (!addr)
|
||||
continue;
|
||||
|
||||
kb->start_addr = (unsigned long)addr;
|
||||
symbol_name = kallsyms_lookup(kb->start_addr,
|
||||
&size, &offset, &modname, namebuf);
|
||||
if (!symbol_name)
|
||||
kb->range = 0;
|
||||
else
|
||||
kb->range = size;
|
||||
}
|
||||
|
||||
if (kretprobe_blacklist_size) {
|
||||
/* lookup the function address from its name */
|
||||
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
|
||||
@@ -1066,8 +1277,12 @@ module_init(init_kprobes);
|
||||
|
||||
EXPORT_SYMBOL_GPL(register_kprobe);
|
||||
EXPORT_SYMBOL_GPL(unregister_kprobe);
|
||||
EXPORT_SYMBOL_GPL(register_kprobes);
|
||||
EXPORT_SYMBOL_GPL(unregister_kprobes);
|
||||
EXPORT_SYMBOL_GPL(register_jprobe);
|
||||
EXPORT_SYMBOL_GPL(unregister_jprobe);
|
||||
EXPORT_SYMBOL_GPL(register_jprobes);
|
||||
EXPORT_SYMBOL_GPL(unregister_jprobes);
|
||||
#ifdef CONFIG_KPROBES
|
||||
EXPORT_SYMBOL_GPL(jprobe_return);
|
||||
#endif
|
||||
@@ -1075,4 +1290,6 @@ EXPORT_SYMBOL_GPL(jprobe_return);
|
||||
#ifdef CONFIG_KPROBES
|
||||
EXPORT_SYMBOL_GPL(register_kretprobe);
|
||||
EXPORT_SYMBOL_GPL(unregister_kretprobe);
|
||||
EXPORT_SYMBOL_GPL(register_kretprobes);
|
||||
EXPORT_SYMBOL_GPL(unregister_kretprobes);
|
||||
#endif
|
||||
|
@@ -13,7 +13,6 @@
|
||||
#include <linux/file.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
#define KTHREAD_NICE_LEVEL (-5)
|
||||
|
||||
@@ -180,6 +179,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
|
||||
wait_task_inactive(k);
|
||||
set_task_cpu(k, cpu);
|
||||
k->cpus_allowed = cpumask_of_cpu(cpu);
|
||||
k->rt.nr_cpus_allowed = 1;
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_bind);
|
||||
|
||||
|
@@ -64,8 +64,8 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
|
||||
return;
|
||||
|
||||
for (i = 0; i < MAXLR; i++) {
|
||||
int q;
|
||||
int same = 1;
|
||||
int q, same = 1;
|
||||
|
||||
/* Nothing stored: */
|
||||
if (!latency_record[i].backtrace[0]) {
|
||||
if (firstnonnull > i)
|
||||
@@ -73,12 +73,15 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
|
||||
continue;
|
||||
}
|
||||
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
|
||||
if (latency_record[i].backtrace[q] !=
|
||||
lat->backtrace[q])
|
||||
unsigned long record = lat->backtrace[q];
|
||||
|
||||
if (latency_record[i].backtrace[q] != record) {
|
||||
same = 0;
|
||||
if (same && lat->backtrace[q] == 0)
|
||||
break;
|
||||
if (same && lat->backtrace[q] == ULONG_MAX)
|
||||
}
|
||||
|
||||
/* 0 and ULONG_MAX entries mean end of backtrace: */
|
||||
if (record == 0 || record == ULONG_MAX)
|
||||
break;
|
||||
}
|
||||
if (same) {
|
||||
@@ -143,14 +146,18 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
||||
for (i = 0; i < LT_SAVECOUNT ; i++) {
|
||||
struct latency_record *mylat;
|
||||
int same = 1;
|
||||
|
||||
mylat = &tsk->latency_record[i];
|
||||
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
|
||||
if (mylat->backtrace[q] !=
|
||||
lat.backtrace[q])
|
||||
unsigned long record = lat.backtrace[q];
|
||||
|
||||
if (mylat->backtrace[q] != record) {
|
||||
same = 0;
|
||||
if (same && lat.backtrace[q] == 0)
|
||||
break;
|
||||
if (same && lat.backtrace[q] == ULONG_MAX)
|
||||
}
|
||||
|
||||
/* 0 and ULONG_MAX entries mean end of backtrace: */
|
||||
if (record == 0 || record == ULONG_MAX)
|
||||
break;
|
||||
}
|
||||
if (same) {
|
||||
|
@@ -43,7 +43,6 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/unwind.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <linux/license.h>
|
||||
#include <asm/sections.h>
|
||||
@@ -664,7 +663,7 @@ static void free_module(struct module *mod);
|
||||
|
||||
static void wait_for_zero_refcount(struct module *mod)
|
||||
{
|
||||
/* Since we might sleep for some time, drop the semaphore first */
|
||||
/* Since we might sleep for some time, release the mutex first */
|
||||
mutex_unlock(&module_mutex);
|
||||
for (;;) {
|
||||
DEBUGP("Looking at refcount...\n");
|
||||
|
@@ -92,7 +92,7 @@ static struct pid_namespace *create_pid_namespace(int level)
|
||||
atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
|
||||
|
||||
for (i = 1; i < PIDMAP_ENTRIES; i++) {
|
||||
ns->pidmap[i].page = 0;
|
||||
ns->pidmap[i].page = NULL;
|
||||
atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
|
||||
}
|
||||
|
||||
|
@@ -1087,45 +1087,45 @@ static void check_process_timers(struct task_struct *tsk,
|
||||
maxfire = 20;
|
||||
prof_expires = cputime_zero;
|
||||
while (!list_empty(timers)) {
|
||||
struct cpu_timer_list *t = list_first_entry(timers,
|
||||
struct cpu_timer_list *tl = list_first_entry(timers,
|
||||
struct cpu_timer_list,
|
||||
entry);
|
||||
if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
|
||||
prof_expires = t->expires.cpu;
|
||||
if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
|
||||
prof_expires = tl->expires.cpu;
|
||||
break;
|
||||
}
|
||||
t->firing = 1;
|
||||
list_move_tail(&t->entry, firing);
|
||||
tl->firing = 1;
|
||||
list_move_tail(&tl->entry, firing);
|
||||
}
|
||||
|
||||
++timers;
|
||||
maxfire = 20;
|
||||
virt_expires = cputime_zero;
|
||||
while (!list_empty(timers)) {
|
||||
struct cpu_timer_list *t = list_first_entry(timers,
|
||||
struct cpu_timer_list *tl = list_first_entry(timers,
|
||||
struct cpu_timer_list,
|
||||
entry);
|
||||
if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
|
||||
virt_expires = t->expires.cpu;
|
||||
if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
|
||||
virt_expires = tl->expires.cpu;
|
||||
break;
|
||||
}
|
||||
t->firing = 1;
|
||||
list_move_tail(&t->entry, firing);
|
||||
tl->firing = 1;
|
||||
list_move_tail(&tl->entry, firing);
|
||||
}
|
||||
|
||||
++timers;
|
||||
maxfire = 20;
|
||||
sched_expires = 0;
|
||||
while (!list_empty(timers)) {
|
||||
struct cpu_timer_list *t = list_first_entry(timers,
|
||||
struct cpu_timer_list *tl = list_first_entry(timers,
|
||||
struct cpu_timer_list,
|
||||
entry);
|
||||
if (!--maxfire || sum_sched_runtime < t->expires.sched) {
|
||||
sched_expires = t->expires.sched;
|
||||
if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
|
||||
sched_expires = tl->expires.sched;
|
||||
break;
|
||||
}
|
||||
t->firing = 1;
|
||||
list_move_tail(&t->entry, firing);
|
||||
tl->firing = 1;
|
||||
list_move_tail(&tl->entry, firing);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -37,7 +37,6 @@
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/compiler.h>
|
||||
|
@@ -19,16 +19,6 @@ config PM
|
||||
will issue the hlt instruction if nothing is to be done, thereby
|
||||
sending the processor to sleep and saving power.
|
||||
|
||||
config PM_LEGACY
|
||||
bool "Legacy Power Management API (DEPRECATED)"
|
||||
depends on PM
|
||||
default n
|
||||
---help---
|
||||
Support for pm_register() and friends. This old API is obsoleted
|
||||
by the driver model.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config PM_DEBUG
|
||||
bool "Power Management Debug Support"
|
||||
depends on PM
|
||||
|
@@ -4,7 +4,6 @@ EXTRA_CFLAGS += -DDEBUG
|
||||
endif
|
||||
|
||||
obj-y := main.o
|
||||
obj-$(CONFIG_PM_LEGACY) += pm.o
|
||||
obj-$(CONFIG_PM_SLEEP) += process.o console.o
|
||||
obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o
|
||||
|
||||
|
@@ -7,17 +7,39 @@
|
||||
#include <linux/vt_kern.h>
|
||||
#include <linux/kbd_kern.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/module.h>
|
||||
#include "power.h"
|
||||
|
||||
#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
|
||||
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
|
||||
|
||||
static int orig_fgconsole, orig_kmsg;
|
||||
static int disable_vt_switch;
|
||||
|
||||
/*
|
||||
* Normally during a suspend, we allocate a new console and switch to it.
|
||||
* When we resume, we switch back to the original console. This switch
|
||||
* can be slow, so on systems where the framebuffer can handle restoration
|
||||
* of video registers anyways, there's little point in doing the console
|
||||
* switch. This function allows you to disable it by passing it '0'.
|
||||
*/
|
||||
void pm_set_vt_switch(int do_switch)
|
||||
{
|
||||
acquire_console_sem();
|
||||
disable_vt_switch = !do_switch;
|
||||
release_console_sem();
|
||||
}
|
||||
EXPORT_SYMBOL(pm_set_vt_switch);
|
||||
|
||||
int pm_prepare_console(void)
|
||||
{
|
||||
acquire_console_sem();
|
||||
|
||||
if (disable_vt_switch) {
|
||||
release_console_sem();
|
||||
return 0;
|
||||
}
|
||||
|
||||
orig_fgconsole = fg_console;
|
||||
|
||||
if (vc_allocate(SUSPEND_CONSOLE)) {
|
||||
@@ -50,9 +72,12 @@ int pm_prepare_console(void)
|
||||
void pm_restore_console(void)
|
||||
{
|
||||
acquire_console_sem();
|
||||
if (disable_vt_switch) {
|
||||
release_console_sem();
|
||||
return;
|
||||
}
|
||||
set_console(orig_fgconsole);
|
||||
release_console_sem();
|
||||
kmsg_redirect = orig_kmsg;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@@ -1,205 +0,0 @@
|
||||
/*
|
||||
* pm.c - Power management interface
|
||||
*
|
||||
* Copyright (C) 2000 Andrew Henroid
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/pm_legacy.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
/*
|
||||
* Locking notes:
|
||||
* pm_devs_lock can be a semaphore providing pm ops are not called
|
||||
* from an interrupt handler (already a bad idea so no change here). Each
|
||||
* change must be protected so that an unlink of an entry doesn't clash
|
||||
* with a pm send - which is permitted to sleep in the current architecture
|
||||
*
|
||||
* Module unloads clashing with pm events now work out safely, the module
|
||||
* unload path will block until the event has been sent. It may well block
|
||||
* until a resume but that will be fine.
|
||||
*/
|
||||
|
||||
static DEFINE_MUTEX(pm_devs_lock);
|
||||
static LIST_HEAD(pm_devs);
|
||||
|
||||
/**
|
||||
* pm_register - register a device with power management
|
||||
* @type: device type
|
||||
* @id: device ID
|
||||
* @callback: callback function
|
||||
*
|
||||
* Add a device to the list of devices that wish to be notified about
|
||||
* power management events. A &pm_dev structure is returned on success,
|
||||
* on failure the return is %NULL.
|
||||
*
|
||||
* The callback function will be called in process context and
|
||||
* it may sleep.
|
||||
*/
|
||||
|
||||
struct pm_dev *pm_register(pm_dev_t type,
|
||||
unsigned long id,
|
||||
pm_callback callback)
|
||||
{
|
||||
struct pm_dev *dev = kzalloc(sizeof(struct pm_dev), GFP_KERNEL);
|
||||
if (dev) {
|
||||
dev->type = type;
|
||||
dev->id = id;
|
||||
dev->callback = callback;
|
||||
|
||||
mutex_lock(&pm_devs_lock);
|
||||
list_add(&dev->entry, &pm_devs);
|
||||
mutex_unlock(&pm_devs_lock);
|
||||
}
|
||||
return dev;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_send - send request to a single device
|
||||
* @dev: device to send to
|
||||
* @rqst: power management request
|
||||
* @data: data for the callback
|
||||
*
|
||||
* Issue a power management request to a given device. The
|
||||
* %PM_SUSPEND and %PM_RESUME events are handled specially. The
|
||||
* data field must hold the intended next state. No call is made
|
||||
* if the state matches.
|
||||
*
|
||||
* BUGS: what stops two power management requests occurring in parallel
|
||||
* and conflicting.
|
||||
*
|
||||
* WARNING: Calling pm_send directly is not generally recommended, in
|
||||
* particular there is no locking against the pm_dev going away. The
|
||||
* caller must maintain all needed locking or have 'inside knowledge'
|
||||
* on the safety. Also remember that this function is not locked against
|
||||
* pm_unregister. This means that you must handle SMP races on callback
|
||||
* execution and unload yourself.
|
||||
*/
|
||||
|
||||
static int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data)
|
||||
{
|
||||
int status = 0;
|
||||
unsigned long prev_state, next_state;
|
||||
|
||||
if (in_interrupt())
|
||||
BUG();
|
||||
|
||||
switch (rqst) {
|
||||
case PM_SUSPEND:
|
||||
case PM_RESUME:
|
||||
prev_state = dev->state;
|
||||
next_state = (unsigned long) data;
|
||||
if (prev_state != next_state) {
|
||||
if (dev->callback)
|
||||
status = (*dev->callback)(dev, rqst, data);
|
||||
if (!status) {
|
||||
dev->state = next_state;
|
||||
dev->prev_state = prev_state;
|
||||
}
|
||||
}
|
||||
else {
|
||||
dev->prev_state = prev_state;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (dev->callback)
|
||||
status = (*dev->callback)(dev, rqst, data);
|
||||
break;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/*
|
||||
* Undo incomplete request
|
||||
*/
|
||||
static void pm_undo_all(struct pm_dev *last)
|
||||
{
|
||||
struct list_head *entry = last->entry.prev;
|
||||
while (entry != &pm_devs) {
|
||||
struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
|
||||
if (dev->state != dev->prev_state) {
|
||||
/* previous state was zero (running) resume or
|
||||
* previous state was non-zero (suspended) suspend
|
||||
*/
|
||||
pm_request_t undo = (dev->prev_state
|
||||
? PM_SUSPEND:PM_RESUME);
|
||||
pm_send(dev, undo, (void*) dev->prev_state);
|
||||
}
|
||||
entry = entry->prev;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_send_all - send request to all managed devices
|
||||
* @rqst: power management request
|
||||
* @data: data for the callback
|
||||
*
|
||||
* Issue a power management request to a all devices. The
|
||||
* %PM_SUSPEND events are handled specially. Any device is
|
||||
* permitted to fail a suspend by returning a non zero (error)
|
||||
* value from its callback function. If any device vetoes a
|
||||
* suspend request then all other devices that have suspended
|
||||
* during the processing of this request are restored to their
|
||||
* previous state.
|
||||
*
|
||||
* WARNING: This function takes the pm_devs_lock. The lock is not dropped until
|
||||
* the callbacks have completed. This prevents races against pm locking
|
||||
* functions, races against module unload pm_unregister code. It does
|
||||
* mean however that you must not issue pm_ functions within the callback
|
||||
* or you will deadlock and users will hate you.
|
||||
*
|
||||
* Zero is returned on success. If a suspend fails then the status
|
||||
* from the device that vetoes the suspend is returned.
|
||||
*
|
||||
* BUGS: what stops two power management requests occurring in parallel
|
||||
* and conflicting.
|
||||
*/
|
||||
|
||||
int pm_send_all(pm_request_t rqst, void *data)
|
||||
{
|
||||
struct list_head *entry;
|
||||
|
||||
mutex_lock(&pm_devs_lock);
|
||||
entry = pm_devs.next;
|
||||
while (entry != &pm_devs) {
|
||||
struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
|
||||
if (dev->callback) {
|
||||
int status = pm_send(dev, rqst, data);
|
||||
if (status) {
|
||||
/* return devices to previous state on
|
||||
* failed suspend request
|
||||
*/
|
||||
if (rqst == PM_SUSPEND)
|
||||
pm_undo_all(dev);
|
||||
mutex_unlock(&pm_devs_lock);
|
||||
return status;
|
||||
}
|
||||
}
|
||||
entry = entry->next;
|
||||
}
|
||||
mutex_unlock(&pm_devs_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(pm_register);
|
||||
EXPORT_SYMBOL(pm_send_all);
|
||||
|
@@ -23,7 +23,6 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
|
@@ -323,9 +323,8 @@ static int ptrace_setoptions(struct task_struct *child, long data)
|
||||
return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static int ptrace_getsiginfo(struct task_struct *child, siginfo_t __user * data)
|
||||
static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
|
||||
{
|
||||
siginfo_t lastinfo;
|
||||
int error = -ESRCH;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
@@ -333,31 +332,25 @@ static int ptrace_getsiginfo(struct task_struct *child, siginfo_t __user * data)
|
||||
error = -EINVAL;
|
||||
spin_lock_irq(&child->sighand->siglock);
|
||||
if (likely(child->last_siginfo != NULL)) {
|
||||
lastinfo = *child->last_siginfo;
|
||||
*info = *child->last_siginfo;
|
||||
error = 0;
|
||||
}
|
||||
spin_unlock_irq(&child->sighand->siglock);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
if (!error)
|
||||
return copy_siginfo_to_user(data, &lastinfo);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int ptrace_setsiginfo(struct task_struct *child, siginfo_t __user * data)
|
||||
static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
|
||||
{
|
||||
siginfo_t newinfo;
|
||||
int error = -ESRCH;
|
||||
|
||||
if (copy_from_user(&newinfo, data, sizeof (siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
if (likely(child->sighand != NULL)) {
|
||||
error = -EINVAL;
|
||||
spin_lock_irq(&child->sighand->siglock);
|
||||
if (likely(child->last_siginfo != NULL)) {
|
||||
*child->last_siginfo = newinfo;
|
||||
*child->last_siginfo = *info;
|
||||
error = 0;
|
||||
}
|
||||
spin_unlock_irq(&child->sighand->siglock);
|
||||
@@ -424,6 +417,7 @@ int ptrace_request(struct task_struct *child, long request,
|
||||
long addr, long data)
|
||||
{
|
||||
int ret = -EIO;
|
||||
siginfo_t siginfo;
|
||||
|
||||
switch (request) {
|
||||
case PTRACE_PEEKTEXT:
|
||||
@@ -442,12 +436,22 @@ int ptrace_request(struct task_struct *child, long request,
|
||||
case PTRACE_GETEVENTMSG:
|
||||
ret = put_user(child->ptrace_message, (unsigned long __user *) data);
|
||||
break;
|
||||
|
||||
case PTRACE_GETSIGINFO:
|
||||
ret = ptrace_getsiginfo(child, (siginfo_t __user *) data);
|
||||
ret = ptrace_getsiginfo(child, &siginfo);
|
||||
if (!ret)
|
||||
ret = copy_siginfo_to_user((siginfo_t __user *) data,
|
||||
&siginfo);
|
||||
break;
|
||||
|
||||
case PTRACE_SETSIGINFO:
|
||||
ret = ptrace_setsiginfo(child, (siginfo_t __user *) data);
|
||||
if (copy_from_user(&siginfo, (siginfo_t __user *) data,
|
||||
sizeof siginfo))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = ptrace_setsiginfo(child, &siginfo);
|
||||
break;
|
||||
|
||||
case PTRACE_DETACH: /* detach a process that was attached. */
|
||||
ret = ptrace_detach(child, data);
|
||||
break;
|
||||
@@ -608,7 +612,7 @@ int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
|
||||
return (copied == sizeof(data)) ? 0 : -EIO;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#if defined CONFIG_COMPAT && defined __ARCH_WANT_COMPAT_SYS_PTRACE
|
||||
#include <linux/compat.h>
|
||||
|
||||
int compat_ptrace_request(struct task_struct *child, compat_long_t request,
|
||||
@@ -616,6 +620,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
|
||||
{
|
||||
compat_ulong_t __user *datap = compat_ptr(data);
|
||||
compat_ulong_t word;
|
||||
siginfo_t siginfo;
|
||||
int ret;
|
||||
|
||||
switch (request) {
|
||||
@@ -638,6 +643,23 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
|
||||
ret = put_user((compat_ulong_t) child->ptrace_message, datap);
|
||||
break;
|
||||
|
||||
case PTRACE_GETSIGINFO:
|
||||
ret = ptrace_getsiginfo(child, &siginfo);
|
||||
if (!ret)
|
||||
ret = copy_siginfo_to_user32(
|
||||
(struct compat_siginfo __user *) datap,
|
||||
&siginfo);
|
||||
break;
|
||||
|
||||
case PTRACE_SETSIGINFO:
|
||||
memset(&siginfo, 0, sizeof siginfo);
|
||||
if (copy_siginfo_from_user32(
|
||||
&siginfo, (struct compat_siginfo __user *) datap))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = ptrace_setsiginfo(child, &siginfo);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
}
|
||||
@@ -645,7 +667,6 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef __ARCH_WANT_COMPAT_SYS_PTRACE
|
||||
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
|
||||
compat_long_t addr, compat_long_t data)
|
||||
{
|
||||
@@ -688,6 +709,4 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
#endif /* __ARCH_WANT_COMPAT_SYS_PTRACE */
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
#endif /* CONFIG_COMPAT && __ARCH_WANT_COMPAT_SYS_PTRACE */
|
||||
|
@@ -1007,10 +1007,10 @@ void __synchronize_sched(void)
|
||||
if (sched_getaffinity(0, &oldmask) < 0)
|
||||
oldmask = cpu_possible_map;
|
||||
for_each_online_cpu(cpu) {
|
||||
sched_setaffinity(0, cpumask_of_cpu(cpu));
|
||||
sched_setaffinity(0, &cpumask_of_cpu(cpu));
|
||||
schedule();
|
||||
}
|
||||
sched_setaffinity(0, oldmask);
|
||||
sched_setaffinity(0, &oldmask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__synchronize_sched);
|
||||
|
||||
|
@@ -723,9 +723,10 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
|
||||
*/
|
||||
static void rcu_torture_shuffle_tasks(void)
|
||||
{
|
||||
cpumask_t tmp_mask = CPU_MASK_ALL;
|
||||
cpumask_t tmp_mask;
|
||||
int i;
|
||||
|
||||
cpus_setall(tmp_mask);
|
||||
get_online_cpus();
|
||||
|
||||
/* No point in shuffling if there is only one online CPU (ex: UP) */
|
||||
@@ -737,25 +738,27 @@ static void rcu_torture_shuffle_tasks(void)
|
||||
if (rcu_idle_cpu != -1)
|
||||
cpu_clear(rcu_idle_cpu, tmp_mask);
|
||||
|
||||
set_cpus_allowed(current, tmp_mask);
|
||||
set_cpus_allowed_ptr(current, &tmp_mask);
|
||||
|
||||
if (reader_tasks) {
|
||||
for (i = 0; i < nrealreaders; i++)
|
||||
if (reader_tasks[i])
|
||||
set_cpus_allowed(reader_tasks[i], tmp_mask);
|
||||
set_cpus_allowed_ptr(reader_tasks[i],
|
||||
&tmp_mask);
|
||||
}
|
||||
|
||||
if (fakewriter_tasks) {
|
||||
for (i = 0; i < nfakewriters; i++)
|
||||
if (fakewriter_tasks[i])
|
||||
set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
|
||||
set_cpus_allowed_ptr(fakewriter_tasks[i],
|
||||
&tmp_mask);
|
||||
}
|
||||
|
||||
if (writer_task)
|
||||
set_cpus_allowed(writer_task, tmp_mask);
|
||||
set_cpus_allowed_ptr(writer_task, &tmp_mask);
|
||||
|
||||
if (stats_task)
|
||||
set_cpus_allowed(stats_task, tmp_mask);
|
||||
set_cpus_allowed_ptr(stats_task, &tmp_mask);
|
||||
|
||||
if (rcu_idle_cpu == -1)
|
||||
rcu_idle_cpu = num_online_cpus() - 1;
|
||||
|
@@ -486,6 +486,24 @@ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t
|
||||
|
||||
EXPORT_SYMBOL(adjust_resource);
|
||||
|
||||
/**
|
||||
* resource_alignment - calculate resource's alignment
|
||||
* @res: resource pointer
|
||||
*
|
||||
* Returns alignment on success, 0 (invalid alignment) on failure.
|
||||
*/
|
||||
resource_size_t resource_alignment(struct resource *res)
|
||||
{
|
||||
switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
|
||||
case IORESOURCE_SIZEALIGN:
|
||||
return res->end - res->start + 1;
|
||||
case IORESOURCE_STARTALIGN:
|
||||
return res->start;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This is compatibility stuff for IO resources.
|
||||
*
|
||||
|
1867
kernel/sched.c
1867
kernel/sched.c
文件差异内容过多而无法显示
加载差异
@@ -67,14 +67,24 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
|
||||
(long long)(p->nvcsw + p->nivcsw),
|
||||
p->prio);
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld\n",
|
||||
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
|
||||
SPLIT_NS(p->se.vruntime),
|
||||
SPLIT_NS(p->se.sum_exec_runtime),
|
||||
SPLIT_NS(p->se.sum_sleep_runtime));
|
||||
#else
|
||||
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld\n",
|
||||
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
|
||||
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
{
|
||||
char path[64];
|
||||
|
||||
cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
|
||||
SEQ_printf(m, " %s", path);
|
||||
}
|
||||
#endif
|
||||
SEQ_printf(m, "\n");
|
||||
}
|
||||
|
||||
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
|
||||
@@ -109,7 +119,21 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
struct sched_entity *last;
|
||||
unsigned long flags;
|
||||
|
||||
SEQ_printf(m, "\ncfs_rq\n");
|
||||
#if !defined(CONFIG_CGROUP_SCHED) || !defined(CONFIG_USER_SCHED)
|
||||
SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
|
||||
#else
|
||||
char path[128] = "";
|
||||
struct cgroup *cgroup = NULL;
|
||||
struct task_group *tg = cfs_rq->tg;
|
||||
|
||||
if (tg)
|
||||
cgroup = tg->css.cgroup;
|
||||
|
||||
if (cgroup)
|
||||
cgroup_path(cgroup, path, sizeof(path));
|
||||
|
||||
SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
|
||||
#endif
|
||||
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
|
||||
SPLIT_NS(cfs_rq->exec_clock));
|
||||
@@ -143,6 +167,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
#endif
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
|
||||
cfs_rq->nr_spread_over);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
#ifdef CONFIG_SMP
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
static void print_cpu(struct seq_file *m, int cpu)
|
||||
@@ -214,7 +243,6 @@ static int sched_debug_show(struct seq_file *m, void *v)
|
||||
PN(sysctl_sched_latency);
|
||||
PN(sysctl_sched_min_granularity);
|
||||
PN(sysctl_sched_wakeup_granularity);
|
||||
PN(sysctl_sched_batch_wakeup_granularity);
|
||||
PN(sysctl_sched_child_runs_first);
|
||||
P(sysctl_sched_features);
|
||||
#undef PN
|
||||
|
@@ -62,24 +62,14 @@ const_debug unsigned int sysctl_sched_child_runs_first = 1;
|
||||
unsigned int __read_mostly sysctl_sched_compat_yield;
|
||||
|
||||
/*
|
||||
* SCHED_BATCH wake-up granularity.
|
||||
* SCHED_OTHER wake-up granularity.
|
||||
* (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
||||
*
|
||||
* This option delays the preemption effects of decoupled workloads
|
||||
* and reduces their over-scheduling. Synchronous workloads will still
|
||||
* have immediate wakeup/sleep latencies.
|
||||
*/
|
||||
unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
|
||||
|
||||
/*
|
||||
* SCHED_OTHER wake-up granularity.
|
||||
* (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
||||
*
|
||||
* This option delays the preemption effects of decoupled workloads
|
||||
* and reduces their over-scheduling. Synchronous workloads will still
|
||||
* have immediate wakeup/sleep latencies.
|
||||
*/
|
||||
unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
|
||||
unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
|
||||
|
||||
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
||||
|
||||
@@ -87,6 +77,11 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
||||
* CFS operations on generic schedulable entities:
|
||||
*/
|
||||
|
||||
static inline struct task_struct *task_of(struct sched_entity *se)
|
||||
{
|
||||
return container_of(se, struct task_struct, se);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
||||
/* cpu runqueue to which this cfs_rq is attached */
|
||||
@@ -98,6 +93,54 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
|
||||
/* An entity is a task if it doesn't "own" a runqueue */
|
||||
#define entity_is_task(se) (!se->my_q)
|
||||
|
||||
/* Walk up scheduling entities hierarchy */
|
||||
#define for_each_sched_entity(se) \
|
||||
for (; se; se = se->parent)
|
||||
|
||||
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
|
||||
{
|
||||
return p->se.cfs_rq;
|
||||
}
|
||||
|
||||
/* runqueue on which this entity is (to be) queued */
|
||||
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
|
||||
{
|
||||
return se->cfs_rq;
|
||||
}
|
||||
|
||||
/* runqueue "owned" by this group */
|
||||
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
|
||||
{
|
||||
return grp->my_q;
|
||||
}
|
||||
|
||||
/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
|
||||
* another cpu ('this_cpu')
|
||||
*/
|
||||
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
|
||||
{
|
||||
return cfs_rq->tg->cfs_rq[this_cpu];
|
||||
}
|
||||
|
||||
/* Iterate thr' all leaf cfs_rq's on a runqueue */
|
||||
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
||||
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
|
||||
|
||||
/* Do the two (enqueued) entities belong to the same group ? */
|
||||
static inline int
|
||||
is_same_group(struct sched_entity *se, struct sched_entity *pse)
|
||||
{
|
||||
if (se->cfs_rq == pse->cfs_rq)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct sched_entity *parent_entity(struct sched_entity *se)
|
||||
{
|
||||
return se->parent;
|
||||
}
|
||||
|
||||
#else /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
|
||||
@@ -107,13 +150,49 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
|
||||
|
||||
#define entity_is_task(se) 1
|
||||
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
#define for_each_sched_entity(se) \
|
||||
for (; se; se = NULL)
|
||||
|
||||
static inline struct task_struct *task_of(struct sched_entity *se)
|
||||
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
|
||||
{
|
||||
return container_of(se, struct task_struct, se);
|
||||
return &task_rq(p)->cfs;
|
||||
}
|
||||
|
||||
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
|
||||
{
|
||||
struct task_struct *p = task_of(se);
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
return &rq->cfs;
|
||||
}
|
||||
|
||||
/* runqueue "owned" by this group */
|
||||
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
|
||||
{
|
||||
return &cpu_rq(this_cpu)->cfs;
|
||||
}
|
||||
|
||||
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
||||
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
|
||||
|
||||
static inline int
|
||||
is_same_group(struct sched_entity *se, struct sched_entity *pse)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline struct sched_entity *parent_entity(struct sched_entity *se)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
|
||||
/**************************************************************
|
||||
* Scheduling class tree data structure manipulation methods:
|
||||
@@ -254,6 +333,34 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* delta *= w / rw
|
||||
*/
|
||||
static inline unsigned long
|
||||
calc_delta_weight(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
for_each_sched_entity(se) {
|
||||
delta = calc_delta_mine(delta,
|
||||
se->load.weight, &cfs_rq_of(se)->load);
|
||||
}
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* delta *= rw / w
|
||||
*/
|
||||
static inline unsigned long
|
||||
calc_delta_fair(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
for_each_sched_entity(se) {
|
||||
delta = calc_delta_mine(delta,
|
||||
cfs_rq_of(se)->load.weight, &se->load);
|
||||
}
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* The idea is to set a period in which each task runs once.
|
||||
*
|
||||
@@ -283,29 +390,54 @@ static u64 __sched_period(unsigned long nr_running)
|
||||
*/
|
||||
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
return calc_delta_mine(__sched_period(cfs_rq->nr_running),
|
||||
se->load.weight, &cfs_rq->load);
|
||||
return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
|
||||
}
|
||||
|
||||
/*
|
||||
* We calculate the vruntime slice.
|
||||
* We calculate the vruntime slice of a to be inserted task
|
||||
*
|
||||
* vs = s/w = p/rw
|
||||
* vs = s*rw/w = p
|
||||
*/
|
||||
static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
|
||||
{
|
||||
u64 vslice = __sched_period(nr_running);
|
||||
|
||||
vslice *= NICE_0_LOAD;
|
||||
do_div(vslice, rq_weight);
|
||||
|
||||
return vslice;
|
||||
}
|
||||
|
||||
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
return __sched_vslice(cfs_rq->load.weight + se->load.weight,
|
||||
cfs_rq->nr_running + 1);
|
||||
unsigned long nr_running = cfs_rq->nr_running;
|
||||
|
||||
if (!se->on_rq)
|
||||
nr_running++;
|
||||
|
||||
return __sched_period(nr_running);
|
||||
}
|
||||
|
||||
/*
|
||||
* The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
|
||||
* that it favours >=0 over <0.
|
||||
*
|
||||
* -20 |
|
||||
* |
|
||||
* 0 --------+-------
|
||||
* .'
|
||||
* 19 .'
|
||||
*
|
||||
*/
|
||||
static unsigned long
|
||||
calc_delta_asym(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
struct load_weight lw = {
|
||||
.weight = NICE_0_LOAD,
|
||||
.inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
|
||||
};
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
struct load_weight *se_lw = &se->load;
|
||||
|
||||
if (se->load.weight < NICE_0_LOAD)
|
||||
se_lw = &lw;
|
||||
|
||||
delta = calc_delta_mine(delta,
|
||||
cfs_rq_of(se)->load.weight, se_lw);
|
||||
}
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -322,11 +454,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
|
||||
|
||||
curr->sum_exec_runtime += delta_exec;
|
||||
schedstat_add(cfs_rq, exec_clock, delta_exec);
|
||||
delta_exec_weighted = delta_exec;
|
||||
if (unlikely(curr->load.weight != NICE_0_LOAD)) {
|
||||
delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
|
||||
&curr->load);
|
||||
}
|
||||
delta_exec_weighted = calc_delta_fair(delta_exec, curr);
|
||||
curr->vruntime += delta_exec_weighted;
|
||||
}
|
||||
|
||||
@@ -413,20 +541,43 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
* Scheduling class queueing methods:
|
||||
*/
|
||||
|
||||
#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
|
||||
static void
|
||||
add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
|
||||
{
|
||||
cfs_rq->task_weight += weight;
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
update_load_add(&cfs_rq->load, se->load.weight);
|
||||
if (!parent_entity(se))
|
||||
inc_cpu_load(rq_of(cfs_rq), se->load.weight);
|
||||
if (entity_is_task(se))
|
||||
add_cfs_task_weight(cfs_rq, se->load.weight);
|
||||
cfs_rq->nr_running++;
|
||||
se->on_rq = 1;
|
||||
list_add(&se->group_node, &cfs_rq->tasks);
|
||||
}
|
||||
|
||||
static void
|
||||
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
update_load_sub(&cfs_rq->load, se->load.weight);
|
||||
if (!parent_entity(se))
|
||||
dec_cpu_load(rq_of(cfs_rq), se->load.weight);
|
||||
if (entity_is_task(se))
|
||||
add_cfs_task_weight(cfs_rq, -se->load.weight);
|
||||
cfs_rq->nr_running--;
|
||||
se->on_rq = 0;
|
||||
list_del_init(&se->group_node);
|
||||
}
|
||||
|
||||
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
@@ -510,8 +661,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
||||
|
||||
if (!initial) {
|
||||
/* sleeps upto a single latency don't count. */
|
||||
if (sched_feat(NEW_FAIR_SLEEPERS))
|
||||
vruntime -= sysctl_sched_latency;
|
||||
if (sched_feat(NEW_FAIR_SLEEPERS)) {
|
||||
if (sched_feat(NORMALIZED_SLEEPER))
|
||||
vruntime -= calc_delta_weight(sysctl_sched_latency, se);
|
||||
else
|
||||
vruntime -= sysctl_sched_latency;
|
||||
}
|
||||
|
||||
/* ensure we never gain time by being placed backwards. */
|
||||
vruntime = max_vruntime(se->vruntime, vruntime);
|
||||
@@ -627,20 +782,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
se->prev_sum_exec_runtime = se->sum_exec_runtime;
|
||||
}
|
||||
|
||||
static int
|
||||
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
|
||||
|
||||
static struct sched_entity *
|
||||
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
s64 diff, gran;
|
||||
|
||||
if (!cfs_rq->next)
|
||||
return se;
|
||||
|
||||
diff = cfs_rq->next->vruntime - se->vruntime;
|
||||
if (diff < 0)
|
||||
return se;
|
||||
|
||||
gran = calc_delta_fair(sysctl_sched_wakeup_granularity, &cfs_rq->load);
|
||||
if (diff > gran)
|
||||
if (wakeup_preempt_entity(cfs_rq->next, se) != 0)
|
||||
return se;
|
||||
|
||||
return cfs_rq->next;
|
||||
@@ -708,101 +859,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
||||
* CFS operations on tasks:
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
||||
/* Walk up scheduling entities hierarchy */
|
||||
#define for_each_sched_entity(se) \
|
||||
for (; se; se = se->parent)
|
||||
|
||||
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
|
||||
{
|
||||
return p->se.cfs_rq;
|
||||
}
|
||||
|
||||
/* runqueue on which this entity is (to be) queued */
|
||||
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
|
||||
{
|
||||
return se->cfs_rq;
|
||||
}
|
||||
|
||||
/* runqueue "owned" by this group */
|
||||
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
|
||||
{
|
||||
return grp->my_q;
|
||||
}
|
||||
|
||||
/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
|
||||
* another cpu ('this_cpu')
|
||||
*/
|
||||
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
|
||||
{
|
||||
return cfs_rq->tg->cfs_rq[this_cpu];
|
||||
}
|
||||
|
||||
/* Iterate thr' all leaf cfs_rq's on a runqueue */
|
||||
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
||||
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
|
||||
|
||||
/* Do the two (enqueued) entities belong to the same group ? */
|
||||
static inline int
|
||||
is_same_group(struct sched_entity *se, struct sched_entity *pse)
|
||||
{
|
||||
if (se->cfs_rq == pse->cfs_rq)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct sched_entity *parent_entity(struct sched_entity *se)
|
||||
{
|
||||
return se->parent;
|
||||
}
|
||||
|
||||
#else /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
#define for_each_sched_entity(se) \
|
||||
for (; se; se = NULL)
|
||||
|
||||
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
|
||||
{
|
||||
return &task_rq(p)->cfs;
|
||||
}
|
||||
|
||||
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
|
||||
{
|
||||
struct task_struct *p = task_of(se);
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
return &rq->cfs;
|
||||
}
|
||||
|
||||
/* runqueue "owned" by this group */
|
||||
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
|
||||
{
|
||||
return &cpu_rq(this_cpu)->cfs;
|
||||
}
|
||||
|
||||
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
|
||||
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
|
||||
|
||||
static inline int
|
||||
is_same_group(struct sched_entity *se, struct sched_entity *pse)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline struct sched_entity *parent_entity(struct sched_entity *se)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
#ifdef CONFIG_SCHED_HRTICK
|
||||
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
@@ -916,7 +972,7 @@ static void yield_task_fair(struct rq *rq)
|
||||
/*
|
||||
* Already in the rightmost position?
|
||||
*/
|
||||
if (unlikely(rightmost->vruntime < se->vruntime))
|
||||
if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -955,7 +1011,9 @@ static int wake_idle(int cpu, struct task_struct *p)
|
||||
return cpu;
|
||||
|
||||
for_each_domain(cpu, sd) {
|
||||
if (sd->flags & SD_WAKE_IDLE) {
|
||||
if ((sd->flags & SD_WAKE_IDLE)
|
||||
|| ((sd->flags & SD_WAKE_IDLE_FAR)
|
||||
&& !task_hot(p, task_rq(p)->clock, sd))) {
|
||||
cpus_and(tmp, sd->span, p->cpus_allowed);
|
||||
for_each_cpu_mask(i, tmp) {
|
||||
if (idle_cpu(i)) {
|
||||
@@ -1099,6 +1157,58 @@ out:
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static unsigned long wakeup_gran(struct sched_entity *se)
|
||||
{
|
||||
unsigned long gran = sysctl_sched_wakeup_granularity;
|
||||
|
||||
/*
|
||||
* More easily preempt - nice tasks, while not making it harder for
|
||||
* + nice tasks.
|
||||
*/
|
||||
gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
|
||||
|
||||
return gran;
|
||||
}
|
||||
|
||||
/*
|
||||
* Should 'se' preempt 'curr'.
|
||||
*
|
||||
* |s1
|
||||
* |s2
|
||||
* |s3
|
||||
* g
|
||||
* |<--->|c
|
||||
*
|
||||
* w(c, s1) = -1
|
||||
* w(c, s2) = 0
|
||||
* w(c, s3) = 1
|
||||
*
|
||||
*/
|
||||
static int
|
||||
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
|
||||
{
|
||||
s64 gran, vdiff = curr->vruntime - se->vruntime;
|
||||
|
||||
if (vdiff < 0)
|
||||
return -1;
|
||||
|
||||
gran = wakeup_gran(curr);
|
||||
if (vdiff > gran)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* return depth at which a sched entity is present in the hierarchy */
|
||||
static inline int depth_se(struct sched_entity *se)
|
||||
{
|
||||
int depth = 0;
|
||||
|
||||
for_each_sched_entity(se)
|
||||
depth++;
|
||||
|
||||
return depth;
|
||||
}
|
||||
|
||||
/*
|
||||
* Preempt the current task with a newly woken task if needed:
|
||||
@@ -1108,7 +1218,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
|
||||
struct task_struct *curr = rq->curr;
|
||||
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
|
||||
struct sched_entity *se = &curr->se, *pse = &p->se;
|
||||
unsigned long gran;
|
||||
int se_depth, pse_depth;
|
||||
|
||||
if (unlikely(rt_prio(p->prio))) {
|
||||
update_rq_clock(rq);
|
||||
@@ -1133,20 +1243,33 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
|
||||
if (!sched_feat(WAKEUP_PREEMPT))
|
||||
return;
|
||||
|
||||
/*
|
||||
* preemption test can be made between sibling entities who are in the
|
||||
* same cfs_rq i.e who have a common parent. Walk up the hierarchy of
|
||||
* both tasks until we find their ancestors who are siblings of common
|
||||
* parent.
|
||||
*/
|
||||
|
||||
/* First walk up until both entities are at same depth */
|
||||
se_depth = depth_se(se);
|
||||
pse_depth = depth_se(pse);
|
||||
|
||||
while (se_depth > pse_depth) {
|
||||
se_depth--;
|
||||
se = parent_entity(se);
|
||||
}
|
||||
|
||||
while (pse_depth > se_depth) {
|
||||
pse_depth--;
|
||||
pse = parent_entity(pse);
|
||||
}
|
||||
|
||||
while (!is_same_group(se, pse)) {
|
||||
se = parent_entity(se);
|
||||
pse = parent_entity(pse);
|
||||
}
|
||||
|
||||
gran = sysctl_sched_wakeup_granularity;
|
||||
/*
|
||||
* More easily preempt - nice tasks, while not making
|
||||
* it harder for + nice tasks.
|
||||
*/
|
||||
if (unlikely(se->load.weight > NICE_0_LOAD))
|
||||
gran = calc_delta_fair(gran, &se->load);
|
||||
|
||||
if (pse->vruntime + gran < se->vruntime)
|
||||
if (wakeup_preempt_entity(se, pse) == 1)
|
||||
resched_task(curr);
|
||||
}
|
||||
|
||||
@@ -1197,15 +1320,27 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
|
||||
* the current task:
|
||||
*/
|
||||
static struct task_struct *
|
||||
__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
|
||||
__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
|
||||
{
|
||||
struct task_struct *p;
|
||||
struct task_struct *p = NULL;
|
||||
struct sched_entity *se;
|
||||
|
||||
if (!curr)
|
||||
if (next == &cfs_rq->tasks)
|
||||
return NULL;
|
||||
|
||||
p = rb_entry(curr, struct task_struct, se.run_node);
|
||||
cfs_rq->rb_load_balance_curr = rb_next(curr);
|
||||
/* Skip over entities that are not tasks */
|
||||
do {
|
||||
se = list_entry(next, struct sched_entity, group_node);
|
||||
next = next->next;
|
||||
} while (next != &cfs_rq->tasks && !entity_is_task(se));
|
||||
|
||||
if (next == &cfs_rq->tasks)
|
||||
return NULL;
|
||||
|
||||
cfs_rq->balance_iterator = next;
|
||||
|
||||
if (entity_is_task(se))
|
||||
p = task_of(se);
|
||||
|
||||
return p;
|
||||
}
|
||||
@@ -1214,85 +1349,100 @@ static struct task_struct *load_balance_start_fair(void *arg)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = arg;
|
||||
|
||||
return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
|
||||
return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
|
||||
}
|
||||
|
||||
static struct task_struct *load_balance_next_fair(void *arg)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = arg;
|
||||
|
||||
return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
|
||||
return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move, struct sched_domain *sd,
|
||||
enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
|
||||
struct cfs_rq *cfs_rq)
|
||||
{
|
||||
struct rq_iterator cfs_rq_iterator;
|
||||
|
||||
cfs_rq_iterator.start = load_balance_start_fair;
|
||||
cfs_rq_iterator.next = load_balance_next_fair;
|
||||
cfs_rq_iterator.arg = cfs_rq;
|
||||
|
||||
return balance_tasks(this_rq, this_cpu, busiest,
|
||||
max_load_move, sd, idle, all_pinned,
|
||||
this_best_prio, &cfs_rq_iterator);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
struct sched_entity *curr;
|
||||
struct task_struct *p;
|
||||
|
||||
if (!cfs_rq->nr_running || !first_fair(cfs_rq))
|
||||
return MAX_PRIO;
|
||||
|
||||
curr = cfs_rq->curr;
|
||||
if (!curr)
|
||||
curr = __pick_next_entity(cfs_rq);
|
||||
|
||||
p = task_of(curr);
|
||||
|
||||
return p->prio;
|
||||
}
|
||||
#endif
|
||||
|
||||
static unsigned long
|
||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *all_pinned, int *this_best_prio)
|
||||
{
|
||||
struct cfs_rq *busy_cfs_rq;
|
||||
long rem_load_move = max_load_move;
|
||||
struct rq_iterator cfs_rq_iterator;
|
||||
int busiest_cpu = cpu_of(busiest);
|
||||
struct task_group *tg;
|
||||
|
||||
cfs_rq_iterator.start = load_balance_start_fair;
|
||||
cfs_rq_iterator.next = load_balance_next_fair;
|
||||
|
||||
for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
struct cfs_rq *this_cfs_rq;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry(tg, &task_groups, list) {
|
||||
long imbalance;
|
||||
unsigned long maxload;
|
||||
unsigned long this_weight, busiest_weight;
|
||||
long rem_load, max_load, moved_load;
|
||||
|
||||
this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
|
||||
|
||||
imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
|
||||
/* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
|
||||
if (imbalance <= 0)
|
||||
/*
|
||||
* empty group
|
||||
*/
|
||||
if (!aggregate(tg, sd)->task_weight)
|
||||
continue;
|
||||
|
||||
/* Don't pull more than imbalance/2 */
|
||||
imbalance /= 2;
|
||||
maxload = min(rem_load_move, imbalance);
|
||||
rem_load = rem_load_move * aggregate(tg, sd)->rq_weight;
|
||||
rem_load /= aggregate(tg, sd)->load + 1;
|
||||
|
||||
*this_best_prio = cfs_rq_best_prio(this_cfs_rq);
|
||||
#else
|
||||
# define maxload rem_load_move
|
||||
#endif
|
||||
/*
|
||||
* pass busy_cfs_rq argument into
|
||||
* load_balance_[start|next]_fair iterators
|
||||
*/
|
||||
cfs_rq_iterator.arg = busy_cfs_rq;
|
||||
rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
|
||||
maxload, sd, idle, all_pinned,
|
||||
this_best_prio,
|
||||
&cfs_rq_iterator);
|
||||
this_weight = tg->cfs_rq[this_cpu]->task_weight;
|
||||
busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
|
||||
|
||||
if (rem_load_move <= 0)
|
||||
imbalance = (busiest_weight - this_weight) / 2;
|
||||
|
||||
if (imbalance < 0)
|
||||
imbalance = busiest_weight;
|
||||
|
||||
max_load = max(rem_load, imbalance);
|
||||
moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
|
||||
max_load, sd, idle, all_pinned, this_best_prio,
|
||||
tg->cfs_rq[busiest_cpu]);
|
||||
|
||||
if (!moved_load)
|
||||
continue;
|
||||
|
||||
move_group_shares(tg, sd, busiest_cpu, this_cpu);
|
||||
|
||||
moved_load *= aggregate(tg, sd)->load;
|
||||
moved_load /= aggregate(tg, sd)->rq_weight + 1;
|
||||
|
||||
rem_load_move -= moved_load;
|
||||
if (rem_load_move < 0)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return max_load_move - rem_load_move;
|
||||
}
|
||||
#else
|
||||
static unsigned long
|
||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *all_pinned, int *this_best_prio)
|
||||
{
|
||||
return __load_balance_fair(this_rq, this_cpu, busiest,
|
||||
max_load_move, sd, idle, all_pinned,
|
||||
this_best_prio, &busiest->cfs);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
@@ -1461,16 +1611,40 @@ static const struct sched_class fair_sched_class = {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
static void
|
||||
print_cfs_rq_tasks(struct seq_file *m, struct cfs_rq *cfs_rq, int depth)
|
||||
{
|
||||
struct sched_entity *se;
|
||||
|
||||
if (!cfs_rq)
|
||||
return;
|
||||
|
||||
list_for_each_entry_rcu(se, &cfs_rq->tasks, group_node) {
|
||||
int i;
|
||||
|
||||
for (i = depth; i; i--)
|
||||
seq_puts(m, " ");
|
||||
|
||||
seq_printf(m, "%lu %s %lu\n",
|
||||
se->load.weight,
|
||||
entity_is_task(se) ? "T" : "G",
|
||||
calc_delta_weight(SCHED_LOAD_SCALE, se)
|
||||
);
|
||||
if (!entity_is_task(se))
|
||||
print_cfs_rq_tasks(m, group_cfs_rq(se), depth + 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void print_cfs_stats(struct seq_file *m, int cpu)
|
||||
{
|
||||
struct cfs_rq *cfs_rq;
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
|
||||
#endif
|
||||
rcu_read_lock();
|
||||
for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
|
||||
print_cfs_rq(m, cpu, cfs_rq);
|
||||
|
||||
seq_printf(m, "\nWeight tree:\n");
|
||||
print_cfs_rq_tasks(m, &cpu_rq(cpu)->cfs, 1);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
10
kernel/sched_features.h
普通文件
10
kernel/sched_features.h
普通文件
@@ -0,0 +1,10 @@
|
||||
SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
|
||||
SCHED_FEAT(WAKEUP_PREEMPT, 1)
|
||||
SCHED_FEAT(START_DEBIT, 1)
|
||||
SCHED_FEAT(AFFINE_WAKEUPS, 1)
|
||||
SCHED_FEAT(CACHE_HOT_BUDDY, 1)
|
||||
SCHED_FEAT(SYNC_WAKEUPS, 1)
|
||||
SCHED_FEAT(HRTICK, 1)
|
||||
SCHED_FEAT(DOUBLE_TICK, 0)
|
||||
SCHED_FEAT(NORMALIZED_SLEEPER, 1)
|
||||
SCHED_FEAT(DEADLINE, 1)
|
@@ -62,7 +62,12 @@ static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
|
||||
if (!rt_rq->tg)
|
||||
return RUNTIME_INF;
|
||||
|
||||
return rt_rq->tg->rt_runtime;
|
||||
return rt_rq->rt_runtime;
|
||||
}
|
||||
|
||||
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
||||
{
|
||||
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
|
||||
}
|
||||
|
||||
#define for_each_leaf_rt_rq(rt_rq, rq) \
|
||||
@@ -127,14 +132,39 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
|
||||
return p->prio != p->normal_prio;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline cpumask_t sched_rt_period_mask(void)
|
||||
{
|
||||
return cpu_rq(smp_processor_id())->rd->span;
|
||||
}
|
||||
#else
|
||||
static inline cpumask_t sched_rt_period_mask(void)
|
||||
{
|
||||
return cpu_online_map;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline
|
||||
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
|
||||
{
|
||||
return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
|
||||
}
|
||||
|
||||
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
|
||||
{
|
||||
return &rt_rq->tg->rt_bandwidth;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
|
||||
{
|
||||
if (sysctl_sched_rt_runtime == -1)
|
||||
return RUNTIME_INF;
|
||||
return rt_rq->rt_runtime;
|
||||
}
|
||||
|
||||
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
|
||||
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
||||
{
|
||||
return ktime_to_ns(def_rt_bandwidth.rt_period);
|
||||
}
|
||||
|
||||
#define for_each_leaf_rt_rq(rt_rq, rq) \
|
||||
@@ -173,6 +203,102 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
||||
{
|
||||
return rt_rq->rt_throttled;
|
||||
}
|
||||
|
||||
static inline cpumask_t sched_rt_period_mask(void)
|
||||
{
|
||||
return cpu_online_map;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
|
||||
{
|
||||
return &cpu_rq(cpu)->rt;
|
||||
}
|
||||
|
||||
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
|
||||
{
|
||||
return &def_rt_bandwidth;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||
{
|
||||
int i, idle = 1;
|
||||
cpumask_t span;
|
||||
|
||||
if (rt_b->rt_runtime == RUNTIME_INF)
|
||||
return 1;
|
||||
|
||||
span = sched_rt_period_mask();
|
||||
for_each_cpu_mask(i, span) {
|
||||
int enqueue = 0;
|
||||
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
if (rt_rq->rt_time) {
|
||||
u64 runtime;
|
||||
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
runtime = rt_rq->rt_runtime;
|
||||
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
|
||||
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
|
||||
rt_rq->rt_throttled = 0;
|
||||
enqueue = 1;
|
||||
}
|
||||
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
||||
idle = 0;
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
}
|
||||
|
||||
if (enqueue)
|
||||
sched_rt_rq_enqueue(rt_rq);
|
||||
spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
return idle;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int balance_runtime(struct rt_rq *rt_rq)
|
||||
{
|
||||
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
||||
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
|
||||
int i, weight, more = 0;
|
||||
u64 rt_period;
|
||||
|
||||
weight = cpus_weight(rd->span);
|
||||
|
||||
spin_lock(&rt_b->rt_runtime_lock);
|
||||
rt_period = ktime_to_ns(rt_b->rt_period);
|
||||
for_each_cpu_mask(i, rd->span) {
|
||||
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
||||
s64 diff;
|
||||
|
||||
if (iter == rt_rq)
|
||||
continue;
|
||||
|
||||
spin_lock(&iter->rt_runtime_lock);
|
||||
diff = iter->rt_runtime - iter->rt_time;
|
||||
if (diff > 0) {
|
||||
do_div(diff, weight);
|
||||
if (rt_rq->rt_runtime + diff > rt_period)
|
||||
diff = rt_period - rt_rq->rt_runtime;
|
||||
iter->rt_runtime -= diff;
|
||||
rt_rq->rt_runtime += diff;
|
||||
more = 1;
|
||||
if (rt_rq->rt_runtime == rt_period) {
|
||||
spin_unlock(&iter->rt_runtime_lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&iter->rt_runtime_lock);
|
||||
}
|
||||
spin_unlock(&rt_b->rt_runtime_lock);
|
||||
|
||||
return more;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
|
||||
@@ -197,12 +323,24 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
||||
if (rt_rq->rt_throttled)
|
||||
return rt_rq_throttled(rt_rq);
|
||||
|
||||
if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (rt_rq->rt_time > runtime) {
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
int more;
|
||||
|
||||
rq->rt_throttled = 1;
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
more = balance_runtime(rt_rq);
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
|
||||
if (more)
|
||||
runtime = sched_rt_runtime(rt_rq);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (rt_rq->rt_time > runtime) {
|
||||
rt_rq->rt_throttled = 1;
|
||||
|
||||
if (rt_rq_throttled(rt_rq)) {
|
||||
sched_rt_rq_dequeue(rt_rq);
|
||||
return 1;
|
||||
@@ -212,29 +350,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void update_sched_rt_period(struct rq *rq)
|
||||
{
|
||||
struct rt_rq *rt_rq;
|
||||
u64 period;
|
||||
|
||||
while (rq->clock > rq->rt_period_expire) {
|
||||
period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
|
||||
rq->rt_period_expire += period;
|
||||
|
||||
for_each_leaf_rt_rq(rt_rq, rq) {
|
||||
u64 runtime = sched_rt_runtime(rt_rq);
|
||||
|
||||
rt_rq->rt_time -= min(rt_rq->rt_time, runtime);
|
||||
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
|
||||
rt_rq->rt_throttled = 0;
|
||||
sched_rt_rq_enqueue(rt_rq);
|
||||
}
|
||||
}
|
||||
|
||||
rq->rt_throttled = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the current task's runtime statistics. Skip current tasks that
|
||||
* are not in our scheduling class.
|
||||
@@ -259,9 +374,15 @@ static void update_curr_rt(struct rq *rq)
|
||||
curr->se.exec_start = rq->clock;
|
||||
cpuacct_charge(curr, delta_exec);
|
||||
|
||||
rt_rq->rt_time += delta_exec;
|
||||
if (sched_rt_runtime_exceeded(rt_rq))
|
||||
resched_task(curr);
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
rt_rq = rt_rq_of_se(rt_se);
|
||||
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
rt_rq->rt_time += delta_exec;
|
||||
if (sched_rt_runtime_exceeded(rt_rq))
|
||||
resched_task(curr);
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
@@ -284,6 +405,11 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (rt_se_boosted(rt_se))
|
||||
rt_rq->rt_nr_boosted++;
|
||||
|
||||
if (rt_rq->tg)
|
||||
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
|
||||
#else
|
||||
start_rt_bandwidth(&def_rt_bandwidth);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -353,27 +479,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
/*
|
||||
* Because the prio of an upper entry depends on the lower
|
||||
* entries, we must remove entries top - down.
|
||||
*
|
||||
* XXX: O(1/2 h^2) because we can only walk up, not down the chain.
|
||||
* doesn't matter much for now, as h=2 for GROUP_SCHED.
|
||||
*/
|
||||
static void dequeue_rt_stack(struct task_struct *p)
|
||||
{
|
||||
struct sched_rt_entity *rt_se, *top_se;
|
||||
struct sched_rt_entity *rt_se, *back = NULL;
|
||||
|
||||
/*
|
||||
* dequeue all, top - down.
|
||||
*/
|
||||
do {
|
||||
rt_se = &p->rt;
|
||||
top_se = NULL;
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
if (on_rt_rq(rt_se))
|
||||
top_se = rt_se;
|
||||
}
|
||||
if (top_se)
|
||||
dequeue_rt_entity(top_se);
|
||||
} while (top_se);
|
||||
rt_se = &p->rt;
|
||||
for_each_sched_rt_entity(rt_se) {
|
||||
rt_se->back = back;
|
||||
back = rt_se;
|
||||
}
|
||||
|
||||
for (rt_se = back; rt_se; rt_se = rt_se->back) {
|
||||
if (on_rt_rq(rt_se))
|
||||
dequeue_rt_entity(rt_se);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -393,6 +513,8 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
|
||||
*/
|
||||
for_each_sched_rt_entity(rt_se)
|
||||
enqueue_rt_entity(rt_se);
|
||||
|
||||
inc_cpu_load(rq, p->se.load.weight);
|
||||
}
|
||||
|
||||
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
|
||||
@@ -412,6 +534,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
|
||||
if (rt_rq && rt_rq->rt_nr_running)
|
||||
enqueue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
dec_cpu_load(rq, p->se.load.weight);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1001,7 +1125,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
|
||||
static void set_cpus_allowed_rt(struct task_struct *p,
|
||||
const cpumask_t *new_mask)
|
||||
{
|
||||
int weight = cpus_weight(*new_mask);
|
||||
|
||||
|
@@ -9,6 +9,11 @@
|
||||
static int show_schedstat(struct seq_file *seq, void *v)
|
||||
{
|
||||
int cpu;
|
||||
int mask_len = NR_CPUS/32 * 9;
|
||||
char *mask_str = kmalloc(mask_len, GFP_KERNEL);
|
||||
|
||||
if (mask_str == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
|
||||
seq_printf(seq, "timestamp %lu\n", jiffies);
|
||||
@@ -36,9 +41,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
|
||||
preempt_disable();
|
||||
for_each_domain(cpu, sd) {
|
||||
enum cpu_idle_type itype;
|
||||
char mask_str[NR_CPUS];
|
||||
|
||||
cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
|
||||
cpumask_scnprintf(mask_str, mask_len, sd->span);
|
||||
seq_printf(seq, "domain%d %s", dcount++, mask_str);
|
||||
for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
|
||||
itype++) {
|
||||
|
264
kernel/semaphore.c
普通文件
264
kernel/semaphore.c
普通文件
@@ -0,0 +1,264 @@
|
||||
/*
|
||||
* Copyright (c) 2008 Intel Corporation
|
||||
* Author: Matthew Wilcox <willy@linux.intel.com>
|
||||
*
|
||||
* Distributed under the terms of the GNU GPL, version 2
|
||||
*
|
||||
* This file implements counting semaphores.
|
||||
* A counting semaphore may be acquired 'n' times before sleeping.
|
||||
* See mutex.c for single-acquisition sleeping locks which enforce
|
||||
* rules which allow code to be debugged more easily.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Some notes on the implementation:
|
||||
*
|
||||
* The spinlock controls access to the other members of the semaphore.
|
||||
* down_trylock() and up() can be called from interrupt context, so we
|
||||
* have to disable interrupts when taking the lock. It turns out various
|
||||
* parts of the kernel expect to be able to use down() on a semaphore in
|
||||
* interrupt context when they know it will succeed, so we have to use
|
||||
* irqsave variants for down(), down_interruptible() and down_killable()
|
||||
* too.
|
||||
*
|
||||
* The ->count variable represents how many more tasks can acquire this
|
||||
* semaphore. If it's zero, there may be tasks waiting on the wait_list.
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
static noinline void __down(struct semaphore *sem);
|
||||
static noinline int __down_interruptible(struct semaphore *sem);
|
||||
static noinline int __down_killable(struct semaphore *sem);
|
||||
static noinline int __down_timeout(struct semaphore *sem, long jiffies);
|
||||
static noinline void __up(struct semaphore *sem);
|
||||
|
||||
/**
|
||||
* down - acquire the semaphore
|
||||
* @sem: the semaphore to be acquired
|
||||
*
|
||||
* Acquires the semaphore. If no more tasks are allowed to acquire the
|
||||
* semaphore, calling this function will put the task to sleep until the
|
||||
* semaphore is released.
|
||||
*
|
||||
* Use of this function is deprecated, please use down_interruptible() or
|
||||
* down_killable() instead.
|
||||
*/
|
||||
void down(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sem->lock, flags);
|
||||
if (likely(sem->count > 0))
|
||||
sem->count--;
|
||||
else
|
||||
__down(sem);
|
||||
spin_unlock_irqrestore(&sem->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(down);
|
||||
|
||||
/**
|
||||
* down_interruptible - acquire the semaphore unless interrupted
|
||||
* @sem: the semaphore to be acquired
|
||||
*
|
||||
* Attempts to acquire the semaphore. If no more tasks are allowed to
|
||||
* acquire the semaphore, calling this function will put the task to sleep.
|
||||
* If the sleep is interrupted by a signal, this function will return -EINTR.
|
||||
* If the semaphore is successfully acquired, this function returns 0.
|
||||
*/
|
||||
int down_interruptible(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
int result = 0;
|
||||
|
||||
spin_lock_irqsave(&sem->lock, flags);
|
||||
if (likely(sem->count > 0))
|
||||
sem->count--;
|
||||
else
|
||||
result = __down_interruptible(sem);
|
||||
spin_unlock_irqrestore(&sem->lock, flags);
|
||||
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL(down_interruptible);
|
||||
|
||||
/**
|
||||
* down_killable - acquire the semaphore unless killed
|
||||
* @sem: the semaphore to be acquired
|
||||
*
|
||||
* Attempts to acquire the semaphore. If no more tasks are allowed to
|
||||
* acquire the semaphore, calling this function will put the task to sleep.
|
||||
* If the sleep is interrupted by a fatal signal, this function will return
|
||||
* -EINTR. If the semaphore is successfully acquired, this function returns
|
||||
* 0.
|
||||
*/
|
||||
int down_killable(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
int result = 0;
|
||||
|
||||
spin_lock_irqsave(&sem->lock, flags);
|
||||
if (likely(sem->count > 0))
|
||||
sem->count--;
|
||||
else
|
||||
result = __down_killable(sem);
|
||||
spin_unlock_irqrestore(&sem->lock, flags);
|
||||
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL(down_killable);
|
||||
|
||||
/**
|
||||
* down_trylock - try to acquire the semaphore, without waiting
|
||||
* @sem: the semaphore to be acquired
|
||||
*
|
||||
* Try to acquire the semaphore atomically. Returns 0 if the mutex has
|
||||
* been acquired successfully or 1 if it it cannot be acquired.
|
||||
*
|
||||
* NOTE: This return value is inverted from both spin_trylock and
|
||||
* mutex_trylock! Be careful about this when converting code.
|
||||
*
|
||||
* Unlike mutex_trylock, this function can be used from interrupt context,
|
||||
* and the semaphore can be released by any task or interrupt.
|
||||
*/
|
||||
int down_trylock(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
int count;
|
||||
|
||||
spin_lock_irqsave(&sem->lock, flags);
|
||||
count = sem->count - 1;
|
||||
if (likely(count >= 0))
|
||||
sem->count = count;
|
||||
spin_unlock_irqrestore(&sem->lock, flags);
|
||||
|
||||
return (count < 0);
|
||||
}
|
||||
EXPORT_SYMBOL(down_trylock);
|
||||
|
||||
/**
|
||||
* down_timeout - acquire the semaphore within a specified time
|
||||
* @sem: the semaphore to be acquired
|
||||
* @jiffies: how long to wait before failing
|
||||
*
|
||||
* Attempts to acquire the semaphore. If no more tasks are allowed to
|
||||
* acquire the semaphore, calling this function will put the task to sleep.
|
||||
* If the semaphore is not released within the specified number of jiffies,
|
||||
* this function returns -ETIME. It returns 0 if the semaphore was acquired.
|
||||
*/
|
||||
int down_timeout(struct semaphore *sem, long jiffies)
|
||||
{
|
||||
unsigned long flags;
|
||||
int result = 0;
|
||||
|
||||
spin_lock_irqsave(&sem->lock, flags);
|
||||
if (likely(sem->count > 0))
|
||||
sem->count--;
|
||||
else
|
||||
result = __down_timeout(sem, jiffies);
|
||||
spin_unlock_irqrestore(&sem->lock, flags);
|
||||
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL(down_timeout);
|
||||
|
||||
/**
|
||||
* up - release the semaphore
|
||||
* @sem: the semaphore to release
|
||||
*
|
||||
* Release the semaphore. Unlike mutexes, up() may be called from any
|
||||
* context and even by tasks which have never called down().
|
||||
*/
|
||||
void up(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sem->lock, flags);
|
||||
if (likely(list_empty(&sem->wait_list)))
|
||||
sem->count++;
|
||||
else
|
||||
__up(sem);
|
||||
spin_unlock_irqrestore(&sem->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(up);
|
||||
|
||||
/* Functions for the contended case */
|
||||
|
||||
struct semaphore_waiter {
|
||||
struct list_head list;
|
||||
struct task_struct *task;
|
||||
int up;
|
||||
};
|
||||
|
||||
/*
|
||||
* Because this function is inlined, the 'state' parameter will be
|
||||
* constant, and thus optimised away by the compiler. Likewise the
|
||||
* 'timeout' parameter for the cases without timeouts.
|
||||
*/
|
||||
static inline int __sched __down_common(struct semaphore *sem, long state,
|
||||
long timeout)
|
||||
{
|
||||
struct task_struct *task = current;
|
||||
struct semaphore_waiter waiter;
|
||||
|
||||
list_add_tail(&waiter.list, &sem->wait_list);
|
||||
waiter.task = task;
|
||||
waiter.up = 0;
|
||||
|
||||
for (;;) {
|
||||
if (state == TASK_INTERRUPTIBLE && signal_pending(task))
|
||||
goto interrupted;
|
||||
if (state == TASK_KILLABLE && fatal_signal_pending(task))
|
||||
goto interrupted;
|
||||
if (timeout <= 0)
|
||||
goto timed_out;
|
||||
__set_task_state(task, state);
|
||||
spin_unlock_irq(&sem->lock);
|
||||
timeout = schedule_timeout(timeout);
|
||||
spin_lock_irq(&sem->lock);
|
||||
if (waiter.up)
|
||||
return 0;
|
||||
}
|
||||
|
||||
timed_out:
|
||||
list_del(&waiter.list);
|
||||
return -ETIME;
|
||||
|
||||
interrupted:
|
||||
list_del(&waiter.list);
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
static noinline void __sched __down(struct semaphore *sem)
|
||||
{
|
||||
__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
|
||||
}
|
||||
|
||||
static noinline int __sched __down_interruptible(struct semaphore *sem)
|
||||
{
|
||||
return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
|
||||
}
|
||||
|
||||
static noinline int __sched __down_killable(struct semaphore *sem)
|
||||
{
|
||||
return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
|
||||
}
|
||||
|
||||
static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
|
||||
{
|
||||
return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
|
||||
}
|
||||
|
||||
static noinline void __sched __up(struct semaphore *sem)
|
||||
{
|
||||
struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
|
||||
struct semaphore_waiter, list);
|
||||
list_del(&waiter->list);
|
||||
waiter->up = 1;
|
||||
wake_up_process(waiter->task);
|
||||
}
|
@@ -220,7 +220,7 @@ void flush_signals(struct task_struct *t)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&t->sighand->siglock, flags);
|
||||
clear_tsk_thread_flag(t,TIF_SIGPENDING);
|
||||
clear_tsk_thread_flag(t, TIF_SIGPENDING);
|
||||
flush_sigqueue(&t->pending);
|
||||
flush_sigqueue(&t->signal->shared_pending);
|
||||
spin_unlock_irqrestore(&t->sighand->siglock, flags);
|
||||
@@ -424,7 +424,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
|
||||
}
|
||||
if (signr &&
|
||||
((info->si_code & __SI_MASK) == __SI_TIMER) &&
|
||||
info->si_sys_private){
|
||||
info->si_sys_private) {
|
||||
/*
|
||||
* Release the siglock to ensure proper locking order
|
||||
* of timer locks outside of siglocks. Note, we leave
|
||||
@@ -1757,6 +1757,45 @@ static int do_signal_stop(int signr)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int ptrace_signal(int signr, siginfo_t *info,
|
||||
struct pt_regs *regs, void *cookie)
|
||||
{
|
||||
if (!(current->ptrace & PT_PTRACED))
|
||||
return signr;
|
||||
|
||||
ptrace_signal_deliver(regs, cookie);
|
||||
|
||||
/* Let the debugger run. */
|
||||
ptrace_stop(signr, 0, info);
|
||||
|
||||
/* We're back. Did the debugger cancel the sig? */
|
||||
signr = current->exit_code;
|
||||
if (signr == 0)
|
||||
return signr;
|
||||
|
||||
current->exit_code = 0;
|
||||
|
||||
/* Update the siginfo structure if the signal has
|
||||
changed. If the debugger wanted something
|
||||
specific in the siginfo structure then it should
|
||||
have updated *info via PTRACE_SETSIGINFO. */
|
||||
if (signr != info->si_signo) {
|
||||
info->si_signo = signr;
|
||||
info->si_errno = 0;
|
||||
info->si_code = SI_USER;
|
||||
info->si_pid = task_pid_vnr(current->parent);
|
||||
info->si_uid = current->parent->uid;
|
||||
}
|
||||
|
||||
/* If the (new) signal is now blocked, requeue it. */
|
||||
if (sigismember(¤t->blocked, signr)) {
|
||||
specific_send_sig_info(signr, info, current);
|
||||
signr = 0;
|
||||
}
|
||||
|
||||
return signr;
|
||||
}
|
||||
|
||||
int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
|
||||
struct pt_regs *regs, void *cookie)
|
||||
{
|
||||
@@ -1785,36 +1824,10 @@ relock:
|
||||
if (!signr)
|
||||
break; /* will return 0 */
|
||||
|
||||
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
|
||||
ptrace_signal_deliver(regs, cookie);
|
||||
|
||||
/* Let the debugger run. */
|
||||
ptrace_stop(signr, 0, info);
|
||||
|
||||
/* We're back. Did the debugger cancel the sig? */
|
||||
signr = current->exit_code;
|
||||
if (signr == 0)
|
||||
if (signr != SIGKILL) {
|
||||
signr = ptrace_signal(signr, info, regs, cookie);
|
||||
if (!signr)
|
||||
continue;
|
||||
|
||||
current->exit_code = 0;
|
||||
|
||||
/* Update the siginfo structure if the signal has
|
||||
changed. If the debugger wanted something
|
||||
specific in the siginfo structure then it should
|
||||
have updated *info via PTRACE_SETSIGINFO. */
|
||||
if (signr != info->si_signo) {
|
||||
info->si_signo = signr;
|
||||
info->si_errno = 0;
|
||||
info->si_code = SI_USER;
|
||||
info->si_pid = task_pid_vnr(current->parent);
|
||||
info->si_uid = current->parent->uid;
|
||||
}
|
||||
|
||||
/* If the (new) signal is now blocked, requeue it. */
|
||||
if (sigismember(¤t->blocked, signr)) {
|
||||
specific_send_sig_info(signr, info, current);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
ka = ¤t->sighand->action[signr-1];
|
||||
|
@@ -356,7 +356,8 @@ void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
|
||||
/* Tasklets */
|
||||
struct tasklet_head
|
||||
{
|
||||
struct tasklet_struct *list;
|
||||
struct tasklet_struct *head;
|
||||
struct tasklet_struct **tail;
|
||||
};
|
||||
|
||||
/* Some compilers disobey section attribute on statics when not
|
||||
@@ -369,8 +370,9 @@ void __tasklet_schedule(struct tasklet_struct *t)
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
t->next = __get_cpu_var(tasklet_vec).list;
|
||||
__get_cpu_var(tasklet_vec).list = t;
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_vec).tail = t;
|
||||
__get_cpu_var(tasklet_vec).tail = &(t->next);
|
||||
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
@@ -382,8 +384,9 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
t->next = __get_cpu_var(tasklet_hi_vec).list;
|
||||
__get_cpu_var(tasklet_hi_vec).list = t;
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = t;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
||||
raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
@@ -395,8 +398,9 @@ static void tasklet_action(struct softirq_action *a)
|
||||
struct tasklet_struct *list;
|
||||
|
||||
local_irq_disable();
|
||||
list = __get_cpu_var(tasklet_vec).list;
|
||||
__get_cpu_var(tasklet_vec).list = NULL;
|
||||
list = __get_cpu_var(tasklet_vec).head;
|
||||
__get_cpu_var(tasklet_vec).head = NULL;
|
||||
__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
|
||||
local_irq_enable();
|
||||
|
||||
while (list) {
|
||||
@@ -416,8 +420,9 @@ static void tasklet_action(struct softirq_action *a)
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
t->next = __get_cpu_var(tasklet_vec).list;
|
||||
__get_cpu_var(tasklet_vec).list = t;
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_vec).tail = t;
|
||||
__get_cpu_var(tasklet_vec).tail = &(t->next);
|
||||
__raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
local_irq_enable();
|
||||
}
|
||||
@@ -428,8 +433,9 @@ static void tasklet_hi_action(struct softirq_action *a)
|
||||
struct tasklet_struct *list;
|
||||
|
||||
local_irq_disable();
|
||||
list = __get_cpu_var(tasklet_hi_vec).list;
|
||||
__get_cpu_var(tasklet_hi_vec).list = NULL;
|
||||
list = __get_cpu_var(tasklet_hi_vec).head;
|
||||
__get_cpu_var(tasklet_hi_vec).head = NULL;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
|
||||
local_irq_enable();
|
||||
|
||||
while (list) {
|
||||
@@ -449,8 +455,9 @@ static void tasklet_hi_action(struct softirq_action *a)
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
t->next = __get_cpu_var(tasklet_hi_vec).list;
|
||||
__get_cpu_var(tasklet_hi_vec).list = t;
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = t;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
||||
__raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
local_irq_enable();
|
||||
}
|
||||
@@ -487,6 +494,15 @@ EXPORT_SYMBOL(tasklet_kill);
|
||||
|
||||
void __init softirq_init(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
per_cpu(tasklet_vec, cpu).tail =
|
||||
&per_cpu(tasklet_vec, cpu).head;
|
||||
per_cpu(tasklet_hi_vec, cpu).tail =
|
||||
&per_cpu(tasklet_hi_vec, cpu).head;
|
||||
}
|
||||
|
||||
open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
|
||||
open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
|
||||
}
|
||||
@@ -555,9 +571,12 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
|
||||
return;
|
||||
|
||||
/* CPU is dead, so no lock needed. */
|
||||
for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
|
||||
for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
|
||||
if (*i == t) {
|
||||
*i = t->next;
|
||||
/* If this was the tail element, move the tail ptr */
|
||||
if (*i == NULL)
|
||||
per_cpu(tasklet_vec, cpu).tail = i;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -566,20 +585,20 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
|
||||
|
||||
static void takeover_tasklets(unsigned int cpu)
|
||||
{
|
||||
struct tasklet_struct **i;
|
||||
|
||||
/* CPU is dead, so no lock needed. */
|
||||
local_irq_disable();
|
||||
|
||||
/* Find end, append list for that CPU. */
|
||||
for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
|
||||
*i = per_cpu(tasklet_vec, cpu).list;
|
||||
per_cpu(tasklet_vec, cpu).list = NULL;
|
||||
*__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).head;
|
||||
__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
|
||||
per_cpu(tasklet_vec, cpu).head = NULL;
|
||||
per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
|
||||
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
|
||||
for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
|
||||
*i = per_cpu(tasklet_hi_vec, cpu).list;
|
||||
per_cpu(tasklet_hi_vec, cpu).list = NULL;
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
|
||||
per_cpu(tasklet_hi_vec, cpu).head = NULL;
|
||||
per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
|
||||
raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
|
||||
local_irq_enable();
|
||||
|
@@ -11,7 +11,6 @@
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* Since we effect priority and affinity (both of which are visible
|
||||
@@ -35,7 +34,7 @@ static int stopmachine(void *cpu)
|
||||
int irqs_disabled = 0;
|
||||
int prepared = 0;
|
||||
|
||||
set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
|
||||
set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
|
||||
|
||||
/* Ack: we are alive */
|
||||
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
|
||||
@@ -135,8 +134,7 @@ static void restart_machine(void)
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
||||
struct stop_machine_data
|
||||
{
|
||||
struct stop_machine_data {
|
||||
int (*fn)(void *);
|
||||
void *data;
|
||||
struct completion done;
|
||||
|
40
kernel/sys.c
40
kernel/sys.c
@@ -67,6 +67,12 @@
|
||||
#ifndef SET_ENDIAN
|
||||
# define SET_ENDIAN(a,b) (-EINVAL)
|
||||
#endif
|
||||
#ifndef GET_TSC_CTL
|
||||
# define GET_TSC_CTL(a) (-EINVAL)
|
||||
#endif
|
||||
#ifndef SET_TSC_CTL
|
||||
# define SET_TSC_CTL(a) (-EINVAL)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* this is where the system-wide overflow UID and GID are defined, for
|
||||
@@ -1626,10 +1632,9 @@ asmlinkage long sys_umask(int mask)
|
||||
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
|
||||
unsigned long arg4, unsigned long arg5)
|
||||
{
|
||||
long error;
|
||||
long uninitialized_var(error);
|
||||
|
||||
error = security_task_prctl(option, arg2, arg3, arg4, arg5);
|
||||
if (error)
|
||||
if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error))
|
||||
return error;
|
||||
|
||||
switch (option) {
|
||||
@@ -1682,17 +1687,6 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
|
||||
error = -EINVAL;
|
||||
break;
|
||||
|
||||
case PR_GET_KEEPCAPS:
|
||||
if (current->keep_capabilities)
|
||||
error = 1;
|
||||
break;
|
||||
case PR_SET_KEEPCAPS:
|
||||
if (arg2 != 0 && arg2 != 1) {
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
current->keep_capabilities = arg2;
|
||||
break;
|
||||
case PR_SET_NAME: {
|
||||
struct task_struct *me = current;
|
||||
unsigned char ncomm[sizeof(me->comm)];
|
||||
@@ -1726,18 +1720,12 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
|
||||
case PR_SET_SECCOMP:
|
||||
error = prctl_set_seccomp(arg2);
|
||||
break;
|
||||
|
||||
case PR_CAPBSET_READ:
|
||||
if (!cap_valid(arg2))
|
||||
return -EINVAL;
|
||||
return !!cap_raised(current->cap_bset, arg2);
|
||||
case PR_CAPBSET_DROP:
|
||||
#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
|
||||
return cap_prctl_drop(arg2);
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
case PR_GET_TSC:
|
||||
error = GET_TSC_CTL(arg2);
|
||||
break;
|
||||
case PR_SET_TSC:
|
||||
error = SET_TSC_CTL(arg2);
|
||||
break;
|
||||
default:
|
||||
error = -EINVAL;
|
||||
break;
|
||||
|
@@ -268,17 +268,6 @@ static struct ctl_table kern_table[] = {
|
||||
.extra1 = &min_wakeup_granularity_ns,
|
||||
.extra2 = &max_wakeup_granularity_ns,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "sched_batch_wakeup_granularity_ns",
|
||||
.data = &sysctl_sched_batch_wakeup_granularity,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &min_wakeup_granularity_ns,
|
||||
.extra2 = &max_wakeup_granularity_ns,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "sched_child_runs_first",
|
||||
@@ -318,7 +307,7 @@ static struct ctl_table kern_table[] = {
|
||||
.data = &sysctl_sched_rt_period,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
.proc_handler = &sched_rt_handler,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
@@ -326,7 +315,7 @@ static struct ctl_table kern_table[] = {
|
||||
.data = &sysctl_sched_rt_runtime,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
.proc_handler = &sched_rt_handler,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
|
@@ -379,6 +379,7 @@ void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
|
||||
ts->tv_sec = sec;
|
||||
ts->tv_nsec = nsec;
|
||||
}
|
||||
EXPORT_SYMBOL(set_normalized_timespec);
|
||||
|
||||
/**
|
||||
* ns_to_timespec - Convert nanoseconds to timespec
|
||||
|
@@ -141,8 +141,16 @@ static void clocksource_watchdog(unsigned long data)
|
||||
}
|
||||
|
||||
if (!list_empty(&watchdog_list)) {
|
||||
__mod_timer(&watchdog_timer,
|
||||
watchdog_timer.expires + WATCHDOG_INTERVAL);
|
||||
/*
|
||||
* Cycle through CPUs to check if the CPUs stay
|
||||
* synchronized to each other.
|
||||
*/
|
||||
int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map);
|
||||
|
||||
if (next_cpu >= NR_CPUS)
|
||||
next_cpu = first_cpu(cpu_online_map);
|
||||
watchdog_timer.expires += WATCHDOG_INTERVAL;
|
||||
add_timer_on(&watchdog_timer, next_cpu);
|
||||
}
|
||||
spin_unlock(&watchdog_lock);
|
||||
}
|
||||
@@ -164,7 +172,8 @@ static void clocksource_check_watchdog(struct clocksource *cs)
|
||||
if (!started && watchdog) {
|
||||
watchdog_last = watchdog->read();
|
||||
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
|
||||
add_timer(&watchdog_timer);
|
||||
add_timer_on(&watchdog_timer,
|
||||
first_cpu(cpu_online_map));
|
||||
}
|
||||
} else {
|
||||
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
|
||||
@@ -185,7 +194,8 @@ static void clocksource_check_watchdog(struct clocksource *cs)
|
||||
watchdog_last = watchdog->read();
|
||||
watchdog_timer.expires =
|
||||
jiffies + WATCHDOG_INTERVAL;
|
||||
add_timer(&watchdog_timer);
|
||||
add_timer_on(&watchdog_timer,
|
||||
first_cpu(cpu_online_map));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -221,6 +231,18 @@ void clocksource_resume(void)
|
||||
spin_unlock_irqrestore(&clocksource_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* clocksource_touch_watchdog - Update watchdog
|
||||
*
|
||||
* Update the watchdog after exception contexts such as kgdb so as not
|
||||
* to incorrectly trip the watchdog.
|
||||
*
|
||||
*/
|
||||
void clocksource_touch_watchdog(void)
|
||||
{
|
||||
clocksource_resume_watchdog();
|
||||
}
|
||||
|
||||
/**
|
||||
* clocksource_get_next - Returns the selected clocksource
|
||||
*
|
||||
|
@@ -14,7 +14,7 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/sched.h>
|
||||
@@ -262,7 +262,7 @@ out:
|
||||
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
|
||||
{
|
||||
if (!cpu_isset(*oncpu, cpu_online_map))
|
||||
printk(KERN_ERR "tick-braodcast: ignoring broadcast for "
|
||||
printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
|
||||
"offline CPU #%d\n", *oncpu);
|
||||
else
|
||||
smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
|
||||
|
@@ -14,12 +14,14 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
#include "tick-internal.h"
|
||||
|
||||
/*
|
||||
|
@@ -14,7 +14,7 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/sched.h>
|
||||
|
@@ -158,9 +158,8 @@ void tick_nohz_stop_idle(int cpu)
|
||||
}
|
||||
}
|
||||
|
||||
static ktime_t tick_nohz_start_idle(int cpu)
|
||||
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
|
||||
{
|
||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
ktime_t now, delta;
|
||||
|
||||
now = ktime_get();
|
||||
@@ -192,7 +191,6 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
|
||||
void tick_nohz_stop_sched_tick(void)
|
||||
{
|
||||
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
|
||||
unsigned long rt_jiffies;
|
||||
struct tick_sched *ts;
|
||||
ktime_t last_update, expires, now;
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
@@ -201,8 +199,8 @@ void tick_nohz_stop_sched_tick(void)
|
||||
local_irq_save(flags);
|
||||
|
||||
cpu = smp_processor_id();
|
||||
now = tick_nohz_start_idle(cpu);
|
||||
ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
now = tick_nohz_start_idle(ts);
|
||||
|
||||
/*
|
||||
* If this cpu is offline and it is the one which updates
|
||||
@@ -222,7 +220,6 @@ void tick_nohz_stop_sched_tick(void)
|
||||
if (need_resched())
|
||||
goto end;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
if (unlikely(local_softirq_pending())) {
|
||||
static int ratelimit;
|
||||
|
||||
@@ -245,10 +242,6 @@ void tick_nohz_stop_sched_tick(void)
|
||||
next_jiffies = get_next_timer_interrupt(last_jiffies);
|
||||
delta_jiffies = next_jiffies - last_jiffies;
|
||||
|
||||
rt_jiffies = rt_needs_cpu(cpu);
|
||||
if (rt_jiffies && rt_jiffies < delta_jiffies)
|
||||
delta_jiffies = rt_jiffies;
|
||||
|
||||
if (rcu_needs_cpu(cpu))
|
||||
delta_jiffies = 1;
|
||||
/*
|
||||
@@ -400,6 +393,7 @@ void tick_nohz_restart_sched_tick(void)
|
||||
sub_preempt_count(HARDIRQ_OFFSET);
|
||||
}
|
||||
|
||||
touch_softlockup_watchdog();
|
||||
/*
|
||||
* Cancel the scheduled timer and restore the tick
|
||||
*/
|
||||
|
@@ -178,6 +178,7 @@ static void change_clocksource(void)
|
||||
if (clock == new)
|
||||
return;
|
||||
|
||||
new->cycle_last = 0;
|
||||
now = clocksource_read(new);
|
||||
nsec = __get_nsec_offset();
|
||||
timespec_add_ns(&xtime, nsec);
|
||||
@@ -295,6 +296,7 @@ static int timekeeping_resume(struct sys_device *dev)
|
||||
timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
|
||||
update_xtime_cache(0);
|
||||
/* re-base the last cycle value */
|
||||
clock->cycle_last = 0;
|
||||
clock->cycle_last = clocksource_read(clock);
|
||||
clock->error = 0;
|
||||
timekeeping_suspended = 0;
|
||||
|
@@ -1228,13 +1228,6 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* lockdep: we want to track each per-CPU base as a separate lock-class,
|
||||
* but timer-bases are kmalloc()-ed, so we need to attach separate
|
||||
* keys to them:
|
||||
*/
|
||||
static struct lock_class_key base_lock_keys[NR_CPUS];
|
||||
|
||||
static int __cpuinit init_timers_cpu(int cpu)
|
||||
{
|
||||
int j;
|
||||
@@ -1277,7 +1270,6 @@ static int __cpuinit init_timers_cpu(int cpu)
|
||||
}
|
||||
|
||||
spin_lock_init(&base->lock);
|
||||
lockdep_set_class(&base->lock, base_lock_keys + cpu);
|
||||
|
||||
for (j = 0; j < TVN_SIZE; j++) {
|
||||
INIT_LIST_HEAD(base->tv5.vec + j);
|
||||
@@ -1316,8 +1308,8 @@ static void __cpuinit migrate_timers(int cpu)
|
||||
new_base = get_cpu_var(tvec_bases);
|
||||
|
||||
local_irq_disable();
|
||||
double_spin_lock(&new_base->lock, &old_base->lock,
|
||||
smp_processor_id() < cpu);
|
||||
spin_lock(&new_base->lock);
|
||||
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
BUG_ON(old_base->running_timer);
|
||||
|
||||
@@ -1330,8 +1322,8 @@ static void __cpuinit migrate_timers(int cpu)
|
||||
migrate_timer_list(new_base, old_base->tv5.vec + i);
|
||||
}
|
||||
|
||||
double_spin_unlock(&new_base->lock, &old_base->lock,
|
||||
smp_processor_id() < cpu);
|
||||
spin_unlock(&old_base->lock);
|
||||
spin_unlock(&new_base->lock);
|
||||
local_irq_enable();
|
||||
put_cpu_var(tvec_bases);
|
||||
}
|
||||
|
@@ -101,7 +101,7 @@ static int sched_create_user(struct user_struct *up)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
up->tg = sched_create_group();
|
||||
up->tg = sched_create_group(&root_task_group);
|
||||
if (IS_ERR(up->tg))
|
||||
rc = -ENOMEM;
|
||||
|
||||
@@ -193,6 +193,33 @@ static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
|
||||
|
||||
static struct kobj_attribute cpu_rt_runtime_attr =
|
||||
__ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
|
||||
|
||||
static ssize_t cpu_rt_period_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct user_struct *up = container_of(kobj, struct user_struct, kobj);
|
||||
|
||||
return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
|
||||
}
|
||||
|
||||
static ssize_t cpu_rt_period_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t size)
|
||||
{
|
||||
struct user_struct *up = container_of(kobj, struct user_struct, kobj);
|
||||
unsigned long rt_period;
|
||||
int rc;
|
||||
|
||||
sscanf(buf, "%lu", &rt_period);
|
||||
|
||||
rc = sched_group_set_rt_period(up->tg, rt_period);
|
||||
|
||||
return (rc ? rc : size);
|
||||
}
|
||||
|
||||
static struct kobj_attribute cpu_rt_period_attr =
|
||||
__ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
|
||||
#endif
|
||||
|
||||
/* default attributes per uid directory */
|
||||
@@ -202,6 +229,7 @@ static struct attribute *uids_attributes[] = {
|
||||
#endif
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
&cpu_rt_runtime_attr.attr,
|
||||
&cpu_rt_period_attr.attr,
|
||||
#endif
|
||||
NULL
|
||||
};
|
||||
|
@@ -219,6 +219,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||
struct timer_list *timer = &dwork->timer;
|
||||
struct work_struct *work = &dwork->work;
|
||||
|
||||
timer_stats_timer_set_start_info(&dwork->timer);
|
||||
if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
|
||||
BUG_ON(timer_pending(timer));
|
||||
BUG_ON(!list_empty(&work->entry));
|
||||
@@ -580,6 +581,7 @@ EXPORT_SYMBOL(schedule_delayed_work);
|
||||
int schedule_delayed_work_on(int cpu,
|
||||
struct delayed_work *dwork, unsigned long delay)
|
||||
{
|
||||
timer_stats_timer_set_start_info(&dwork->timer);
|
||||
return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
|
||||
}
|
||||
EXPORT_SYMBOL(schedule_delayed_work_on);
|
||||
|
在新工单中引用
屏蔽一个用户