Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/geneve.c Here we had an overlapping change, where in 'net' the extraneous stats bump was being removed whilst in 'net-next' the final argument to udp_tunnel6_xmit_skb() was being changed. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -97,6 +97,12 @@ static DEFINE_SPINLOCK(css_set_lock);
|
||||
*/
|
||||
static DEFINE_SPINLOCK(cgroup_idr_lock);
|
||||
|
||||
/*
|
||||
* Protects cgroup_file->kn for !self csses. It synchronizes notifications
|
||||
* against file removal/re-creation across css hiding.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(cgroup_file_kn_lock);
|
||||
|
||||
/*
|
||||
* Protects cgroup_subsys->release_agent_path. Modifying it also requires
|
||||
* cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
|
||||
@@ -730,9 +736,11 @@ static void put_css_set_locked(struct css_set *cset)
|
||||
if (!atomic_dec_and_test(&cset->refcount))
|
||||
return;
|
||||
|
||||
/* This css_set is dead. unlink it and release cgroup refcounts */
|
||||
for_each_subsys(ss, ssid)
|
||||
/* This css_set is dead. unlink it and release cgroup and css refs */
|
||||
for_each_subsys(ss, ssid) {
|
||||
list_del(&cset->e_cset_node[ssid]);
|
||||
css_put(cset->subsys[ssid]);
|
||||
}
|
||||
hash_del(&cset->hlist);
|
||||
css_set_count--;
|
||||
|
||||
@@ -1032,9 +1040,13 @@ static struct css_set *find_css_set(struct css_set *old_cset,
|
||||
key = css_set_hash(cset->subsys);
|
||||
hash_add(css_set_table, &cset->hlist, key);
|
||||
|
||||
for_each_subsys(ss, ssid)
|
||||
for_each_subsys(ss, ssid) {
|
||||
struct cgroup_subsys_state *css = cset->subsys[ssid];
|
||||
|
||||
list_add_tail(&cset->e_cset_node[ssid],
|
||||
&cset->subsys[ssid]->cgroup->e_csets[ssid]);
|
||||
&css->cgroup->e_csets[ssid]);
|
||||
css_get(css);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&css_set_lock);
|
||||
|
||||
@@ -1369,6 +1381,16 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
|
||||
char name[CGROUP_FILE_NAME_MAX];
|
||||
|
||||
lockdep_assert_held(&cgroup_mutex);
|
||||
|
||||
if (cft->file_offset) {
|
||||
struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
|
||||
struct cgroup_file *cfile = (void *)css + cft->file_offset;
|
||||
|
||||
spin_lock_irq(&cgroup_file_kn_lock);
|
||||
cfile->kn = NULL;
|
||||
spin_unlock_irq(&cgroup_file_kn_lock);
|
||||
}
|
||||
|
||||
kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
|
||||
}
|
||||
|
||||
@@ -1832,7 +1854,6 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
|
||||
|
||||
INIT_LIST_HEAD(&cgrp->self.sibling);
|
||||
INIT_LIST_HEAD(&cgrp->self.children);
|
||||
INIT_LIST_HEAD(&cgrp->self.files);
|
||||
INIT_LIST_HEAD(&cgrp->cset_links);
|
||||
INIT_LIST_HEAD(&cgrp->pidlists);
|
||||
mutex_init(&cgrp->pidlist_mutex);
|
||||
@@ -2193,6 +2214,9 @@ struct cgroup_taskset {
|
||||
struct list_head src_csets;
|
||||
struct list_head dst_csets;
|
||||
|
||||
/* the subsys currently being processed */
|
||||
int ssid;
|
||||
|
||||
/*
|
||||
* Fields for cgroup_taskset_*() iteration.
|
||||
*
|
||||
@@ -2255,25 +2279,29 @@ static void cgroup_taskset_add(struct task_struct *task,
|
||||
/**
|
||||
* cgroup_taskset_first - reset taskset and return the first task
|
||||
* @tset: taskset of interest
|
||||
* @dst_cssp: output variable for the destination css
|
||||
*
|
||||
* @tset iteration is initialized and the first task is returned.
|
||||
*/
|
||||
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
|
||||
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
|
||||
struct cgroup_subsys_state **dst_cssp)
|
||||
{
|
||||
tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
|
||||
tset->cur_task = NULL;
|
||||
|
||||
return cgroup_taskset_next(tset);
|
||||
return cgroup_taskset_next(tset, dst_cssp);
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_taskset_next - iterate to the next task in taskset
|
||||
* @tset: taskset of interest
|
||||
* @dst_cssp: output variable for the destination css
|
||||
*
|
||||
* Return the next task in @tset. Iteration must have been initialized
|
||||
* with cgroup_taskset_first().
|
||||
*/
|
||||
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
|
||||
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
|
||||
struct cgroup_subsys_state **dst_cssp)
|
||||
{
|
||||
struct css_set *cset = tset->cur_cset;
|
||||
struct task_struct *task = tset->cur_task;
|
||||
@@ -2288,6 +2316,18 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
|
||||
if (&task->cg_list != &cset->mg_tasks) {
|
||||
tset->cur_cset = cset;
|
||||
tset->cur_task = task;
|
||||
|
||||
/*
|
||||
* This function may be called both before and
|
||||
* after cgroup_taskset_migrate(). The two cases
|
||||
* can be distinguished by looking at whether @cset
|
||||
* has its ->mg_dst_cset set.
|
||||
*/
|
||||
if (cset->mg_dst_cset)
|
||||
*dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
|
||||
else
|
||||
*dst_cssp = cset->subsys[tset->ssid];
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
@@ -2323,7 +2363,8 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
|
||||
/* check that we can legitimately attach to the cgroup */
|
||||
for_each_e_css(css, i, dst_cgrp) {
|
||||
if (css->ss->can_attach) {
|
||||
ret = css->ss->can_attach(css, tset);
|
||||
tset->ssid = i;
|
||||
ret = css->ss->can_attach(tset);
|
||||
if (ret) {
|
||||
failed_css = css;
|
||||
goto out_cancel_attach;
|
||||
@@ -2356,9 +2397,12 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
|
||||
*/
|
||||
tset->csets = &tset->dst_csets;
|
||||
|
||||
for_each_e_css(css, i, dst_cgrp)
|
||||
if (css->ss->attach)
|
||||
css->ss->attach(css, tset);
|
||||
for_each_e_css(css, i, dst_cgrp) {
|
||||
if (css->ss->attach) {
|
||||
tset->ssid = i;
|
||||
css->ss->attach(tset);
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
goto out_release_tset;
|
||||
@@ -2367,8 +2411,10 @@ out_cancel_attach:
|
||||
for_each_e_css(css, i, dst_cgrp) {
|
||||
if (css == failed_css)
|
||||
break;
|
||||
if (css->ss->cancel_attach)
|
||||
css->ss->cancel_attach(css, tset);
|
||||
if (css->ss->cancel_attach) {
|
||||
tset->ssid = i;
|
||||
css->ss->cancel_attach(tset);
|
||||
}
|
||||
}
|
||||
out_release_tset:
|
||||
spin_lock_bh(&css_set_lock);
|
||||
@@ -3290,9 +3336,9 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
|
||||
if (cft->file_offset) {
|
||||
struct cgroup_file *cfile = (void *)css + cft->file_offset;
|
||||
|
||||
kernfs_get(kn);
|
||||
spin_lock_irq(&cgroup_file_kn_lock);
|
||||
cfile->kn = kn;
|
||||
list_add(&cfile->node, &css->files);
|
||||
spin_unlock_irq(&cgroup_file_kn_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -3529,6 +3575,22 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
|
||||
return cgroup_add_cftypes(ss, cfts);
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_file_notify - generate a file modified event for a cgroup_file
|
||||
* @cfile: target cgroup_file
|
||||
*
|
||||
* @cfile must have been obtained by setting cftype->file_offset.
|
||||
*/
|
||||
void cgroup_file_notify(struct cgroup_file *cfile)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cgroup_file_kn_lock, flags);
|
||||
if (cfile->kn)
|
||||
kernfs_notify(cfile->kn);
|
||||
spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_task_count - count the number of tasks in a cgroup.
|
||||
* @cgrp: the cgroup in question
|
||||
@@ -4590,13 +4652,9 @@ static void css_free_work_fn(struct work_struct *work)
|
||||
container_of(work, struct cgroup_subsys_state, destroy_work);
|
||||
struct cgroup_subsys *ss = css->ss;
|
||||
struct cgroup *cgrp = css->cgroup;
|
||||
struct cgroup_file *cfile;
|
||||
|
||||
percpu_ref_exit(&css->refcnt);
|
||||
|
||||
list_for_each_entry(cfile, &css->files, node)
|
||||
kernfs_put(cfile->kn);
|
||||
|
||||
if (ss) {
|
||||
/* css free path */
|
||||
int id = css->id;
|
||||
@@ -4701,7 +4759,6 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
|
||||
css->ss = ss;
|
||||
INIT_LIST_HEAD(&css->sibling);
|
||||
INIT_LIST_HEAD(&css->children);
|
||||
INIT_LIST_HEAD(&css->files);
|
||||
css->serial_nr = css_serial_nr_next++;
|
||||
|
||||
if (cgroup_parent(cgrp)) {
|
||||
|
@@ -155,12 +155,10 @@ static void freezer_css_free(struct cgroup_subsys_state *css)
|
||||
* @freezer->lock. freezer_attach() makes the new tasks conform to the
|
||||
* current state and all following state changes can see the new tasks.
|
||||
*/
|
||||
static void freezer_attach(struct cgroup_subsys_state *new_css,
|
||||
struct cgroup_taskset *tset)
|
||||
static void freezer_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct freezer *freezer = css_freezer(new_css);
|
||||
struct task_struct *task;
|
||||
bool clear_frozen = false;
|
||||
struct cgroup_subsys_state *new_css;
|
||||
|
||||
mutex_lock(&freezer_mutex);
|
||||
|
||||
@@ -174,22 +172,21 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
|
||||
* current state before executing the following - !frozen tasks may
|
||||
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
|
||||
*/
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
cgroup_taskset_for_each(task, new_css, tset) {
|
||||
struct freezer *freezer = css_freezer(new_css);
|
||||
|
||||
if (!(freezer->state & CGROUP_FREEZING)) {
|
||||
__thaw_task(task);
|
||||
} else {
|
||||
freeze_task(task);
|
||||
freezer->state &= ~CGROUP_FROZEN;
|
||||
clear_frozen = true;
|
||||
/* clear FROZEN and propagate upwards */
|
||||
while (freezer && (freezer->state & CGROUP_FROZEN)) {
|
||||
freezer->state &= ~CGROUP_FROZEN;
|
||||
freezer = parent_freezer(freezer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* propagate FROZEN clearing upwards */
|
||||
while (clear_frozen && (freezer = parent_freezer(freezer))) {
|
||||
freezer->state &= ~CGROUP_FROZEN;
|
||||
clear_frozen = freezer->state & CGROUP_FREEZING;
|
||||
}
|
||||
|
||||
mutex_unlock(&freezer_mutex);
|
||||
}
|
||||
|
||||
|
@@ -106,7 +106,7 @@ static void pids_uncharge(struct pids_cgroup *pids, int num)
|
||||
{
|
||||
struct pids_cgroup *p;
|
||||
|
||||
for (p = pids; p; p = parent_pids(p))
|
||||
for (p = pids; parent_pids(p); p = parent_pids(p))
|
||||
pids_cancel(p, num);
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ static void pids_charge(struct pids_cgroup *pids, int num)
|
||||
{
|
||||
struct pids_cgroup *p;
|
||||
|
||||
for (p = pids; p; p = parent_pids(p))
|
||||
for (p = pids; parent_pids(p); p = parent_pids(p))
|
||||
atomic64_add(num, &p->counter);
|
||||
}
|
||||
|
||||
@@ -140,7 +140,7 @@ static int pids_try_charge(struct pids_cgroup *pids, int num)
|
||||
{
|
||||
struct pids_cgroup *p, *q;
|
||||
|
||||
for (p = pids; p; p = parent_pids(p)) {
|
||||
for (p = pids; parent_pids(p); p = parent_pids(p)) {
|
||||
int64_t new = atomic64_add_return(num, &p->counter);
|
||||
|
||||
/*
|
||||
@@ -162,13 +162,13 @@ revert:
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static int pids_can_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
static int pids_can_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct pids_cgroup *pids = css_pids(css);
|
||||
struct task_struct *task;
|
||||
struct cgroup_subsys_state *dst_css;
|
||||
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
cgroup_taskset_for_each(task, dst_css, tset) {
|
||||
struct pids_cgroup *pids = css_pids(dst_css);
|
||||
struct cgroup_subsys_state *old_css;
|
||||
struct pids_cgroup *old_pids;
|
||||
|
||||
@@ -187,13 +187,13 @@ static int pids_can_attach(struct cgroup_subsys_state *css,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pids_cancel_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
static void pids_cancel_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct pids_cgroup *pids = css_pids(css);
|
||||
struct task_struct *task;
|
||||
struct cgroup_subsys_state *dst_css;
|
||||
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
cgroup_taskset_for_each(task, dst_css, tset) {
|
||||
struct pids_cgroup *pids = css_pids(dst_css);
|
||||
struct cgroup_subsys_state *old_css;
|
||||
struct pids_cgroup *old_pids;
|
||||
|
||||
@@ -205,65 +205,28 @@ static void pids_cancel_attach(struct cgroup_subsys_state *css,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
|
||||
* on threadgroup_change_begin() held by the copy_process().
|
||||
*/
|
||||
static int pids_can_fork(struct task_struct *task, void **priv_p)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
struct pids_cgroup *pids;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Use the "current" task_css for the pids subsystem as the tentative
|
||||
* css. It is possible we will charge the wrong hierarchy, in which
|
||||
* case we will forcefully revert/reapply the charge on the right
|
||||
* hierarchy after it is committed to the task proper.
|
||||
*/
|
||||
css = task_get_css(current, pids_cgrp_id);
|
||||
css = task_css_check(current, pids_cgrp_id, true);
|
||||
pids = css_pids(css);
|
||||
|
||||
err = pids_try_charge(pids, 1);
|
||||
if (err)
|
||||
goto err_css_put;
|
||||
|
||||
*priv_p = css;
|
||||
return 0;
|
||||
|
||||
err_css_put:
|
||||
css_put(css);
|
||||
return err;
|
||||
return pids_try_charge(pids, 1);
|
||||
}
|
||||
|
||||
static void pids_cancel_fork(struct task_struct *task, void *priv)
|
||||
{
|
||||
struct cgroup_subsys_state *css = priv;
|
||||
struct pids_cgroup *pids = css_pids(css);
|
||||
|
||||
pids_uncharge(pids, 1);
|
||||
css_put(css);
|
||||
}
|
||||
|
||||
static void pids_fork(struct task_struct *task, void *priv)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
struct cgroup_subsys_state *old_css = priv;
|
||||
struct pids_cgroup *pids;
|
||||
struct pids_cgroup *old_pids = css_pids(old_css);
|
||||
|
||||
css = task_get_css(task, pids_cgrp_id);
|
||||
css = task_css_check(current, pids_cgrp_id, true);
|
||||
pids = css_pids(css);
|
||||
|
||||
/*
|
||||
* If the association has changed, we have to revert and reapply the
|
||||
* charge/uncharge on the wrong hierarchy to the current one. Since
|
||||
* the association can only change due to an organisation event, its
|
||||
* okay for us to ignore the limit in this case.
|
||||
*/
|
||||
if (pids != old_pids) {
|
||||
pids_uncharge(old_pids, 1);
|
||||
pids_charge(pids, 1);
|
||||
}
|
||||
|
||||
css_put(css);
|
||||
css_put(old_css);
|
||||
pids_uncharge(pids, 1);
|
||||
}
|
||||
|
||||
static void pids_free(struct task_struct *task)
|
||||
@@ -335,6 +298,7 @@ static struct cftype pids_files[] = {
|
||||
{
|
||||
.name = "current",
|
||||
.read_s64 = pids_current_read,
|
||||
.flags = CFTYPE_NOT_ON_ROOT,
|
||||
},
|
||||
{ } /* terminate */
|
||||
};
|
||||
@@ -346,7 +310,6 @@ struct cgroup_subsys pids_cgrp_subsys = {
|
||||
.cancel_attach = pids_cancel_attach,
|
||||
.can_fork = pids_can_fork,
|
||||
.cancel_fork = pids_cancel_fork,
|
||||
.fork = pids_fork,
|
||||
.free = pids_free,
|
||||
.legacy_cftypes = pids_files,
|
||||
.dfl_cftypes = pids_files,
|
||||
|
@@ -1429,15 +1429,16 @@ static int fmeter_getrate(struct fmeter *fmp)
|
||||
static struct cpuset *cpuset_attach_old_cs;
|
||||
|
||||
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
|
||||
static int cpuset_can_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
static int cpuset_can_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct cpuset *cs = css_cs(css);
|
||||
struct cgroup_subsys_state *css;
|
||||
struct cpuset *cs;
|
||||
struct task_struct *task;
|
||||
int ret;
|
||||
|
||||
/* used later by cpuset_attach() */
|
||||
cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset));
|
||||
cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
|
||||
cs = css_cs(css);
|
||||
|
||||
mutex_lock(&cpuset_mutex);
|
||||
|
||||
@@ -1447,7 +1448,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
|
||||
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
|
||||
goto out_unlock;
|
||||
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
ret = task_can_attach(task, cs->cpus_allowed);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
@@ -1467,9 +1468,14 @@ out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
static void cpuset_cancel_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
struct cpuset *cs;
|
||||
|
||||
cgroup_taskset_first(tset, &css);
|
||||
cs = css_cs(css);
|
||||
|
||||
mutex_lock(&cpuset_mutex);
|
||||
css_cs(css)->attach_in_progress--;
|
||||
mutex_unlock(&cpuset_mutex);
|
||||
@@ -1482,16 +1488,19 @@ static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
|
||||
*/
|
||||
static cpumask_var_t cpus_attach;
|
||||
|
||||
static void cpuset_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
static void cpuset_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
/* static buf protected by cpuset_mutex */
|
||||
static nodemask_t cpuset_attach_nodemask_to;
|
||||
struct task_struct *task;
|
||||
struct task_struct *leader;
|
||||
struct cpuset *cs = css_cs(css);
|
||||
struct cgroup_subsys_state *css;
|
||||
struct cpuset *cs;
|
||||
struct cpuset *oldcs = cpuset_attach_old_cs;
|
||||
|
||||
cgroup_taskset_first(tset, &css);
|
||||
cs = css_cs(css);
|
||||
|
||||
mutex_lock(&cpuset_mutex);
|
||||
|
||||
/* prepare for attach */
|
||||
@@ -1502,7 +1511,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
|
||||
|
||||
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
|
||||
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
/*
|
||||
* can_attach beforehand should guarantee that this doesn't
|
||||
* fail. TODO: have a better way to handle failure here
|
||||
@@ -1518,7 +1527,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
|
||||
* sleep and should be moved outside migration path proper.
|
||||
*/
|
||||
cpuset_attach_nodemask_to = cs->effective_mems;
|
||||
cgroup_taskset_for_each_leader(leader, tset) {
|
||||
cgroup_taskset_for_each_leader(leader, css, tset) {
|
||||
struct mm_struct *mm = get_task_mm(leader);
|
||||
|
||||
if (mm) {
|
||||
|
@@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright <20> 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
|
@@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
|
||||
if (!is_cgroup_event(event))
|
||||
return;
|
||||
|
||||
cgrp = perf_cgroup_from_task(current);
|
||||
cgrp = perf_cgroup_from_task(current, event->ctx);
|
||||
/*
|
||||
* Do not update time when cgroup is not active
|
||||
*/
|
||||
@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
|
||||
if (!task || !ctx->nr_cgroups)
|
||||
return;
|
||||
|
||||
cgrp = perf_cgroup_from_task(task);
|
||||
cgrp = perf_cgroup_from_task(task, ctx);
|
||||
info = this_cpu_ptr(cgrp->info);
|
||||
info->timestamp = ctx->timestamp;
|
||||
}
|
||||
@@ -489,7 +489,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
|
||||
* we reschedule only in the presence of cgroup
|
||||
* constrained events.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
||||
@@ -522,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
|
||||
* set cgrp before ctxsw in to allow
|
||||
* event_filter_match() to not have to pass
|
||||
* task around
|
||||
* we pass the cpuctx->ctx to perf_cgroup_from_task()
|
||||
* because cgorup events are only per-cpu
|
||||
*/
|
||||
cpuctx->cgrp = perf_cgroup_from_task(task);
|
||||
cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
|
||||
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
|
||||
}
|
||||
perf_pmu_enable(cpuctx->ctx.pmu);
|
||||
@@ -531,8 +532,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -542,17 +541,20 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
|
||||
struct perf_cgroup *cgrp1;
|
||||
struct perf_cgroup *cgrp2 = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
/*
|
||||
* we come here when we know perf_cgroup_events > 0
|
||||
* we do not need to pass the ctx here because we know
|
||||
* we are holding the rcu lock
|
||||
*/
|
||||
cgrp1 = perf_cgroup_from_task(task);
|
||||
cgrp1 = perf_cgroup_from_task(task, NULL);
|
||||
|
||||
/*
|
||||
* next is NULL when called from perf_event_enable_on_exec()
|
||||
* that will systematically cause a cgroup_switch()
|
||||
*/
|
||||
if (next)
|
||||
cgrp2 = perf_cgroup_from_task(next);
|
||||
cgrp2 = perf_cgroup_from_task(next, NULL);
|
||||
|
||||
/*
|
||||
* only schedule out current cgroup events if we know
|
||||
@@ -561,6 +563,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
|
||||
*/
|
||||
if (cgrp1 != cgrp2)
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
@@ -569,13 +573,16 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
struct perf_cgroup *cgrp1;
|
||||
struct perf_cgroup *cgrp2 = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
/*
|
||||
* we come here when we know perf_cgroup_events > 0
|
||||
* we do not need to pass the ctx here because we know
|
||||
* we are holding the rcu lock
|
||||
*/
|
||||
cgrp1 = perf_cgroup_from_task(task);
|
||||
cgrp1 = perf_cgroup_from_task(task, NULL);
|
||||
|
||||
/* prev can never be NULL */
|
||||
cgrp2 = perf_cgroup_from_task(prev);
|
||||
cgrp2 = perf_cgroup_from_task(prev, NULL);
|
||||
|
||||
/*
|
||||
* only need to schedule in cgroup events if we are changing
|
||||
@@ -584,6 +591,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
*/
|
||||
if (cgrp1 != cgrp2)
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
||||
@@ -4216,7 +4225,14 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
__perf_event_period(&pe);
|
||||
if (event->attr.freq) {
|
||||
event->attr.sample_freq = value;
|
||||
} else {
|
||||
event->attr.sample_period = value;
|
||||
event->hw.sample_period = value;
|
||||
}
|
||||
|
||||
local64_set(&event->hw.period_left, 0);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
|
||||
return 0;
|
||||
@@ -5666,6 +5682,17 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
|
||||
struct perf_event_context *task_ctx)
|
||||
{
|
||||
rcu_read_lock();
|
||||
preempt_disable();
|
||||
perf_event_aux_ctx(task_ctx, output, data);
|
||||
preempt_enable();
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void
|
||||
perf_event_aux(perf_event_aux_output_cb output, void *data,
|
||||
struct perf_event_context *task_ctx)
|
||||
@@ -5675,14 +5702,23 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
|
||||
struct pmu *pmu;
|
||||
int ctxn;
|
||||
|
||||
/*
|
||||
* If we have task_ctx != NULL we only notify
|
||||
* the task context itself. The task_ctx is set
|
||||
* only for EXIT events before releasing task
|
||||
* context.
|
||||
*/
|
||||
if (task_ctx) {
|
||||
perf_event_aux_task_ctx(output, data, task_ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||
if (cpuctx->unique_pmu != pmu)
|
||||
goto next;
|
||||
perf_event_aux_ctx(&cpuctx->ctx, output, data);
|
||||
if (task_ctx)
|
||||
goto next;
|
||||
ctxn = pmu->task_ctx_nr;
|
||||
if (ctxn < 0)
|
||||
goto next;
|
||||
@@ -5692,12 +5728,6 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
|
||||
next:
|
||||
put_cpu_ptr(pmu->pmu_cpu_context);
|
||||
}
|
||||
|
||||
if (task_ctx) {
|
||||
preempt_disable();
|
||||
perf_event_aux_ctx(task_ctx, output, data);
|
||||
preempt_enable();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@@ -8787,10 +8817,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
||||
struct perf_event_context *child_ctx, *clone_ctx = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
if (likely(!child->perf_event_ctxp[ctxn])) {
|
||||
perf_event_task(child, NULL, 0);
|
||||
if (likely(!child->perf_event_ctxp[ctxn]))
|
||||
return;
|
||||
}
|
||||
|
||||
local_irq_save(flags);
|
||||
/*
|
||||
@@ -8874,6 +8902,14 @@ void perf_event_exit_task(struct task_struct *child)
|
||||
|
||||
for_each_task_context_nr(ctxn)
|
||||
perf_event_exit_task_context(child, ctxn);
|
||||
|
||||
/*
|
||||
* The perf_event_exit_task_context calls perf_event_task
|
||||
* with child's task_ctx, which generates EXIT events for
|
||||
* child contexts and sets child->perf_event_ctxp[] to NULL.
|
||||
* At this point we need to send EXIT events to cpu contexts.
|
||||
*/
|
||||
perf_event_task(child, NULL, 0);
|
||||
}
|
||||
|
||||
static void perf_free_event(struct perf_event *event,
|
||||
@@ -9452,16 +9488,18 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
|
||||
static int __perf_cgroup_move(void *info)
|
||||
{
|
||||
struct task_struct *task = info;
|
||||
rcu_read_lock();
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void perf_cgroup_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
static void perf_cgroup_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
cgroup_taskset_for_each(task, tset)
|
||||
cgroup_taskset_for_each(task, css, tset)
|
||||
task_function_call(task, __perf_cgroup_move, task);
|
||||
}
|
||||
|
||||
|
@@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
|
@@ -19,7 +19,7 @@
|
||||
* Authors:
|
||||
* Srikar Dronamraju
|
||||
* Jim Keniston
|
||||
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
@@ -1368,8 +1368,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
p->real_start_time = ktime_get_boot_ns();
|
||||
p->io_context = NULL;
|
||||
p->audit_context = NULL;
|
||||
if (clone_flags & CLONE_THREAD)
|
||||
threadgroup_change_begin(current);
|
||||
threadgroup_change_begin(current);
|
||||
cgroup_fork(p);
|
||||
#ifdef CONFIG_NUMA
|
||||
p->mempolicy = mpol_dup(p->mempolicy);
|
||||
@@ -1610,8 +1609,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
|
||||
proc_fork_connector(p);
|
||||
cgroup_post_fork(p, cgrp_ss_priv);
|
||||
if (clone_flags & CLONE_THREAD)
|
||||
threadgroup_change_end(current);
|
||||
threadgroup_change_end(current);
|
||||
perf_event_fork(p);
|
||||
|
||||
trace_task_newtask(p, clone_flags);
|
||||
@@ -1652,8 +1650,7 @@ bad_fork_cleanup_policy:
|
||||
mpol_put(p->mempolicy);
|
||||
bad_fork_cleanup_threadgroup_lock:
|
||||
#endif
|
||||
if (clone_flags & CLONE_THREAD)
|
||||
threadgroup_change_end(current);
|
||||
threadgroup_change_end(current);
|
||||
delayacct_tsk_free(p);
|
||||
bad_fork_cleanup_count:
|
||||
atomic_dec(&p->cred->user->processes);
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Provides a framework for enqueueing and running callbacks from hardirq
|
||||
* context. The enqueueing is NMI-safe.
|
||||
|
@@ -2,7 +2,7 @@
|
||||
* jump label support
|
||||
*
|
||||
* Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
|
||||
* Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2011 Peter Zijlstra
|
||||
*
|
||||
*/
|
||||
#include <linux/memory.h>
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* Started by Ingo Molnar:
|
||||
*
|
||||
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* this code maps all the lock dependencies as they occur in a live kernel
|
||||
* and will warn about the following classes of locking bugs:
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* Started by Ingo Molnar:
|
||||
*
|
||||
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Code for /proc/lockdep and /proc/lockdep_stats:
|
||||
*
|
||||
|
@@ -93,10 +93,12 @@ bool osq_lock(struct optimistic_spin_queue *lock)
|
||||
node->cpu = curr;
|
||||
|
||||
/*
|
||||
* ACQUIRE semantics, pairs with corresponding RELEASE
|
||||
* in unlock() uncontended, or fastpath.
|
||||
* We need both ACQUIRE (pairs with corresponding RELEASE in
|
||||
* unlock() uncontended, or fastpath) and RELEASE (to publish
|
||||
* the node fields we just initialised) semantics when updating
|
||||
* the lock tail.
|
||||
*/
|
||||
old = atomic_xchg_acquire(&lock->tail, curr);
|
||||
old = atomic_xchg(&lock->tail, curr);
|
||||
if (old == OSQ_UNLOCKED_VAL)
|
||||
return true;
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* sched_clock for unstable cpu clocks
|
||||
*
|
||||
* Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Updates and enhancements:
|
||||
* Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
|
||||
|
@@ -1946,6 +1946,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||
goto stat;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
|
||||
* possible to, falsely, observe p->on_cpu == 0.
|
||||
*
|
||||
* One must be running (->on_cpu == 1) in order to remove oneself
|
||||
* from the runqueue.
|
||||
*
|
||||
* [S] ->on_cpu = 1; [L] ->on_rq
|
||||
* UNLOCK rq->lock
|
||||
* RMB
|
||||
* LOCK rq->lock
|
||||
* [S] ->on_rq = 0; [L] ->on_cpu
|
||||
*
|
||||
* Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
|
||||
* from the consecutive calls to schedule(); the first switching to our
|
||||
* task, the second putting it to sleep.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
/*
|
||||
* If the owning (remote) cpu is still in the middle of schedule() with
|
||||
* this task as prev, wait until its done referencing the task.
|
||||
@@ -1953,7 +1972,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||
while (p->on_cpu)
|
||||
cpu_relax();
|
||||
/*
|
||||
* Pairs with the smp_wmb() in finish_lock_switch().
|
||||
* Combined with the control dependency above, we have an effective
|
||||
* smp_load_acquire() without the need for full barriers.
|
||||
*
|
||||
* Pairs with the smp_store_release() in finish_lock_switch().
|
||||
*
|
||||
* This ensures that tasks getting woken will be fully ordered against
|
||||
* their previous state and preserve Program Order.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
@@ -2039,7 +2064,6 @@ out:
|
||||
*/
|
||||
int wake_up_process(struct task_struct *p)
|
||||
{
|
||||
WARN_ON(task_is_stopped_or_traced(p));
|
||||
return try_to_wake_up(p, TASK_NORMAL, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(wake_up_process);
|
||||
@@ -5847,13 +5871,13 @@ static int init_rootdomain(struct root_domain *rd)
|
||||
{
|
||||
memset(rd, 0, sizeof(*rd));
|
||||
|
||||
if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
|
||||
goto out;
|
||||
if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
|
||||
goto free_span;
|
||||
if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
|
||||
goto free_online;
|
||||
if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
|
||||
goto free_dlo_mask;
|
||||
|
||||
init_dl_bw(&rd->dl_bw);
|
||||
@@ -8217,12 +8241,12 @@ static void cpu_cgroup_fork(struct task_struct *task, void *private)
|
||||
sched_move_task(task);
|
||||
}
|
||||
|
||||
static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (!sched_rt_can_attach(css_tg(css), task))
|
||||
return -EINVAL;
|
||||
@@ -8235,12 +8259,12 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset)
|
||||
static void cpu_cgroup_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
cgroup_taskset_for_each(task, tset)
|
||||
cgroup_taskset_for_each(task, css, tset)
|
||||
sched_move_task(task);
|
||||
}
|
||||
|
||||
|
@@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t)
|
||||
unsigned int seq;
|
||||
cputime_t gtime;
|
||||
|
||||
if (!context_tracking_is_enabled())
|
||||
return t->gtime;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&t->vtime_seqlock);
|
||||
|
||||
|
@@ -17,7 +17,7 @@
|
||||
* Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
|
||||
*
|
||||
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/latencytop.h>
|
||||
|
@@ -64,7 +64,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
||||
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
|
||||
static void push_irq_work_func(struct irq_work *work);
|
||||
#endif
|
||||
|
||||
|
@@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
||||
* We must ensure this doesn't happen until the switch is completely
|
||||
* finished.
|
||||
*
|
||||
* In particular, the load of prev->state in finish_task_switch() must
|
||||
* happen before this.
|
||||
*
|
||||
* Pairs with the control dependency and rmb in try_to_wake_up().
|
||||
*/
|
||||
smp_store_release(&prev->on_cpu, 0);
|
||||
|
@@ -392,7 +392,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
do {
|
||||
prepare_to_wait(wq, &q->wait, mode);
|
||||
if (test_bit(q->key.bit_nr, q->key.flags))
|
||||
ret = (*action)(&q->key);
|
||||
ret = (*action)(&q->key, mode);
|
||||
} while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
|
||||
finish_wait(wq, &q->wait);
|
||||
return ret;
|
||||
@@ -431,7 +431,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
prepare_to_wait_exclusive(wq, &q->wait, mode);
|
||||
if (!test_bit(q->key.bit_nr, q->key.flags))
|
||||
continue;
|
||||
ret = action(&q->key);
|
||||
ret = action(&q->key, mode);
|
||||
if (!ret)
|
||||
continue;
|
||||
abort_exclusive_wait(wq, &q->wait, mode, &q->key);
|
||||
@@ -581,44 +581,44 @@ void wake_up_atomic_t(atomic_t *p)
|
||||
}
|
||||
EXPORT_SYMBOL(wake_up_atomic_t);
|
||||
|
||||
__sched int bit_wait(struct wait_bit_key *word)
|
||||
__sched int bit_wait(struct wait_bit_key *word, int mode)
|
||||
{
|
||||
if (signal_pending_state(current->state, current))
|
||||
return 1;
|
||||
schedule();
|
||||
if (signal_pending_state(mode, current))
|
||||
return -EINTR;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(bit_wait);
|
||||
|
||||
__sched int bit_wait_io(struct wait_bit_key *word)
|
||||
__sched int bit_wait_io(struct wait_bit_key *word, int mode)
|
||||
{
|
||||
if (signal_pending_state(current->state, current))
|
||||
return 1;
|
||||
io_schedule();
|
||||
if (signal_pending_state(mode, current))
|
||||
return -EINTR;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(bit_wait_io);
|
||||
|
||||
__sched int bit_wait_timeout(struct wait_bit_key *word)
|
||||
__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
|
||||
{
|
||||
unsigned long now = READ_ONCE(jiffies);
|
||||
if (signal_pending_state(current->state, current))
|
||||
return 1;
|
||||
if (time_after_eq(now, word->timeout))
|
||||
return -EAGAIN;
|
||||
schedule_timeout(word->timeout - now);
|
||||
if (signal_pending_state(mode, current))
|
||||
return -EINTR;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bit_wait_timeout);
|
||||
|
||||
__sched int bit_wait_io_timeout(struct wait_bit_key *word)
|
||||
__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
|
||||
{
|
||||
unsigned long now = READ_ONCE(jiffies);
|
||||
if (signal_pending_state(current->state, current))
|
||||
return 1;
|
||||
if (time_after_eq(now, word->timeout))
|
||||
return -EAGAIN;
|
||||
io_schedule_timeout(word->timeout - now);
|
||||
if (signal_pending_state(mode, current))
|
||||
return -EINTR;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
|
||||
|
@@ -531,7 +531,7 @@ static int __init cpu_stop_init(void)
|
||||
}
|
||||
early_initcall(cpu_stop_init);
|
||||
|
||||
#ifdef CONFIG_STOP_MACHINE
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
|
||||
|
||||
static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
|
||||
{
|
||||
@@ -631,4 +631,4 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
|
||||
return ret ?: done.ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_STOP_MACHINE */
|
||||
#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* trace event based perf event profiling/tracing
|
||||
*
|
||||
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
|
||||
* Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*/
|
||||
|
||||
|
Reference in New Issue
Block a user